Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix ansible lint seperate commits #684

Merged
merged 15 commits into from
Oct 27, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,10 @@ jobs:
include:
- name: centos-stream-8
container-name: el8stream
pip-command: pip3.8
- name: centos-stream-9
container-name: el9stream
pip-command: pip3

name: ${{ matrix.name }}

Expand All @@ -27,6 +29,9 @@ jobs:
image: quay.io/ovirt/buildcontainer:${{ matrix.container-name }}

steps:
- name: Install required PyPI packages
run: ${{ matrix.pip-command }} install "ansible-lint>=6.0.0,<7.0.0"

- name: Checkout sources
uses: ovirt/checkout-action@main

Expand Down
25 changes: 5 additions & 20 deletions build/ansible-check.sh
Original file line number Diff line number Diff line change
@@ -1,27 +1,12 @@
#!/bin/sh -x

# Search for playbooks within specified directories (one level only)
PLABOOKS_DIR="packaging/ansible-runner-service-project/project"

# Directory with roles
ROLES_DIR="packaging/ansible-runner-service-project/project/roles"

SRCDIR="$(dirname "$0")/.."

ANSIBLE_LINT=/usr/bin/ansible-lint
ANSIBLE_LINT_CONF="$(dirname "$0")/ansible-lint.conf"

if ! which "${ANSIBLE_LINT}" > /dev/null 2>&1; then
echo "WARNING: tool '${ANSIBLE_LINT}' is missing" >&2
# Check if the ansible-lint binary exists
if ! command -v ansible-lint > /dev/null 2>&1; then
echo "WARNING: tool 'ansible-lint' is missing" >&2
exit 0
fi

cd "${SRCDIR}"

# Find playbooks
PARAMS=$(find ${PLABOOKS_DIR} -type f -name '*.yml' -maxdepth 1)

# Find roles
PARAMS="$PARAMS $(find ${ROLES_DIR} -type d -maxdepth 1)"

${ANSIBLE_LINT} -c ${ANSIBLE_LINT_CONF} ${PARAMS}
# Run ansible-lint
ansible-lint -c ${ANSIBLE_LINT_CONF} packaging/ansible-runner-service-project/project/roles/*
11 changes: 8 additions & 3 deletions build/ansible-lint.conf
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
skip_list:
# [E701]: "meta/main.yml should contain relevant info"
# meta-no-info:: "meta/main.yml should contain relevant info"
# Roles in ovirt-engine are not intended to be used/imported by Ansible Galaxy
- '701'

- 'meta-no-info'
# role-name: Role name does not match ``^[a-z][a-z0-9_]*$`` pattern.
- 'role-name'
# name: All tasks should be named. (name[missing])
- 'name'
# no-changed-when: Commands should not change things if nothing needs doing.
- 'no-changed-when'
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,6 @@

pre_tasks:
- include_vars: ovirt_host_upgrade_vars.yml
tags:
- skip_ansible_lint # E502

- include: ovirt-host-yum-conf.yml

Expand Down
Original file line number Diff line number Diff line change
@@ -1,27 +1,27 @@
---
# rc 5 = Physical volume '/dev/name' is already in volume group
- name: Setup SSD for caching | Extend the Volume Group
command: "vgextend --dataalignment 256K {{ vgname }} {{ ssd }}"
ansible.builtin.command: "vgextend --dataalignment 256K {{ vgname }} {{ ssd }}"
register: resp
failed_when: resp.rc not in [0, 5]
changed_when: resp.rc == 0

# rc 5 = Physical volume '/dev/name' is already in volume group
- name: Setup SSD for caching | Create LV for cache
command: "lvcreate -L {{ cache_lvsize }} -n {{ cache_lvname }} {{ vgname }}"
ansible.builtin.command: "lvcreate -L {{ cache_lvsize }} -n {{ cache_lvname }} {{ vgname }}"
register: resp
failed_when: resp.rc not in [0, 5]
changed_when: resp.rc == 0

- name: Setup SSD for caching | Create metadata LV for cache
command: "lvcreate -L {{ cache_meta_lvsize }} -n {{ cache_meta_lv }} {{ vgname }}"
ansible.builtin.command: "lvcreate -L {{ cache_meta_lvsize }} -n {{ cache_meta_lv }} {{ vgname }}"
when: cache_meta_lv is defined and cache_meta_lv != ' '
register: resp
failed_when: resp.rc not in [0, 5]
changed_when: resp.rc == 0

- name: Setup SSD for caching | Convert logical volume to a cache pool LV
command: >
ansible.builtin.command: >
lvconvert -y --type cache-pool --poolmetadata {{ cache_meta_lv }}
--poolmetadataspare n
--cachemode {{ cachemode | default('writethrough') }}
Expand All @@ -35,7 +35,7 @@
# It is valid not to have cachemetalvname! Writing a separate task not to
# complicate things.
- name: Setup SSD for caching | Convert logical volume to a cache pool LV without cachemetalvname
command: >
ansible.builtin.command: >
lvconvert -y --type cache-pool
--poolmetadataspare n
--cachemode {{ cachemode | default('writethrough') }}
Expand All @@ -47,11 +47,9 @@

# Run lvs -a -o +devices to see the cache settings
- name: Setup SSD for caching | Convert an existing logical volume to a cache LV
command: >
ansible.builtin.command: >
lvconvert -y --type cache --cachepool "/dev/{{ vgname }}/{{ cache_lvname }}"
"/dev/{{ vgname }}/{{ lvname }}_pool"
register: resp
failed_when: resp.rc not in [0, 5]
changed_when: resp.rc == 0
tags:
- skip_ansible_lint
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
---
# rc 1 = Device or resource busy
- name: Clean up filesystem signature
command: wipefs -a {{ item }}
ansible.builtin.command: wipefs -a {{ item }}
with_items: "{{ disks | default([]) }}"
when: wipefs == 'yes' and item is defined
register: resp
Expand All @@ -11,25 +11,25 @@
# needed if we can always assume 256K for JBOD, however we provide this extra
# variable to override it.
- name: Set PV data alignment for JBOD
set_fact:
ansible.builtin.set_fact:
pv_dataalign: "{{ gluster_infra_dalign | default('256K') }}"
when: disktype|upper in ['NONE', 'RAID0']
when: disktype | upper in ['NONE', 'RAID0']

# Set data alignment for RAID
# We need KiB: ensure to keep the trailing `K' in the pv_dataalign calculation.
- name: Set PV data alignment for RAID
set_fact:
pv_dataalign: "{{ diskcount|int * stripesize|int }}K"
when: disktype|upper in ['RAID6', 'RAID10']
ansible.builtin.set_fact:
pv_dataalign: "{{ diskcount | int * stripesize | int }}K"
when: disktype | upper in ['RAID6', 'RAID10']

- name: Set VG physical extent size for RAID
set_fact:
vg_pesize: "{{ diskcount|int * stripesize|int }}K"
when: disktype|upper in ['RAID6', 'RAID10']
ansible.builtin.set_fact:
vg_pesize: "{{ diskcount | int * stripesize | int }}K"
when: disktype | upper in ['RAID6', 'RAID10']

# rc 3 = already exists in filesystem
- name: Create volume groups
command: "vgcreate --dataalignment {{ pv_dataalign }} -s {{ vg_pesize | default(4) }} {{ vgname }} {{ disks | join(' ') }}"
ansible.builtin.command: "vgcreate --dataalignment {{ pv_dataalign }} -s {{ vg_pesize | default(4) }} {{ vgname }} {{ disks | join(' ') }}"
register: resp
failed_when: resp.rc not in [0, 3]
changed_when: resp.rc == 0
Expand All @@ -42,54 +42,59 @@
# to full_stripe_size
#
- name: Calculate chunksize for RAID6/RAID10
set_fact:
lv_chunksize: "{{ stripesize|int * diskcount|int }}K"
when: disktype|upper in ['RAID6', 'RAID10']
ansible.builtin.set_fact:
lv_chunksize: "{{ stripesize | int * diskcount | int }}K"
when: disktype | upper in ['RAID6', 'RAID10']

# For JBOD the thin pool chunk size is set to 256 KiB.
- name: Set chunksize for JBOD
set_fact:
ansible.builtin.set_fact:
lv_chunksize: '256K'
when: disktype|upper in ['NONE', 'RAID0']
when: disktype | upper in ['NONE', 'RAID0']

# rc 5 = Logical Volume 'name' already exists in volume group.
- name: Create a LV thinpool
command: "lvcreate -l 100%FREE --chunksize {{ lv_chunksize }} --poolmetadatasize {{ pool_metadatasize }} --zero n --type thin-pool --thinpool {{ lvname }}_pool {{ vgname }}"
ansible.builtin.command: >
"lvcreate -l 100%FREE --chunksize {{ lv_chunksize }} --poolmetadatasize {{ pool_metadatasize }} --zero n
--type thin-pool --thinpool {{ lvname }}_pool {{ vgname }}"
register: resp
failed_when: resp.rc not in [0, 5]
changed_when: resp.rc == 0

# rc 5 = Logical Volume 'name' already exists in volume group.
- name: Create thin logical volume
command: "lvcreate -T {{ vgname }}/{{ lvname }}_pool -V {{ size }} -n {{ lvname }}"
ansible.builtin.command: "lvcreate -T {{ vgname }}/{{ lvname }}_pool -V {{ size }} -n {{ lvname }}"
register: resp
failed_when: resp.rc not in [0, 5]
changed_when: resp.rc == 0

- include_tasks: lvmcache.yml
- ansible.builtin.include_tasks: lvmcache.yml
when: ssd is defined and ssd

# rc 1 = Filesystem already exists
- name: Create an xfs filesystem
command: "mkfs.xfs -f -K -i size=512 -n size=8192 {% if 'raid' in disktype %} -d sw={{ diskcount }},su={{ stripesize }}k {% endif %} /dev/{{ vgname }}/{{ lvname }}"
ansible.builtin.command: >
"mkfs.xfs -f -K -i size=512 -n size=8192 {% if 'raid' in disktype %} -d sw={{ diskcount }},su={{ stripesize }}k
{% endif %} /dev/{{ vgname }}/{{ lvname }}"
register: resp
failed_when: resp.rc not in [0, 1]
changed_when: resp.rc == 0

- name: Create the backend directory, skips if present
file:
ansible.builtin.file:
path: "{{ mntpath }}"
state: directory
mode: 0755

- name: Mount the brick
mount:
ansible.posix.mount:
name: "{{ mntpath }}"
src: "/dev/{{ vgname }}/{{ lvname }}"
fstype: "{{ fstype }}"
opts: "inode64,noatime,nodiratime"
state: mounted

- name: Set SELinux labels on the bricks
command: "chcon -t glusterd_brick_t {{ mntpath }}"
ansible.builtin.command: "chcon -t glusterd_brick_t {{ mntpath }}"
register: resp
changed_when: resp.rc == 0
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
# defaults file for replace_node
# defaults file for replace_node
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
- name: Make sure authorized_keys file is present
stat:
ansible.builtin.stat:
path: "/root/.ssh/authorized_keys"
register: authkey

- name: Copy the authorized_keys from the active host to the new host
synchronize:
ansible.posix.synchronize:
src: "/root/.ssh/authorized_keys"
dest: "/root/.ssh/authorized_keys"
mode: pull
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,62 +2,62 @@
# Peer restoration
# Create tmp dir for storing peer data
- name: Create temporary storage directory
tempfile:
ansible.builtin.tempfile:
state: directory
suffix: _peer
register: tmpdir
delegate_to: localhost
run_once: True
run_once: true

# Set the glusterd location
- name: Set the path of glusterd.info file
set_fact:
ansible.builtin.set_fact:
glusterd_libdir: "/var/lib/glusterd"
peer_tmp_dir: "{{ tmpdir['path'] }}"

- name: Import Tasks
block:

- name: Include pre-task in play
import_tasks: pre-task.yml
ansible.builtin.import_tasks: pre-task.yml

- name: Include authorization task in play
import_tasks: authorization.yml
ansible.builtin.import_tasks: authorization.yml

- name: Include peers reconfiguration task in play
import_tasks: peers.yml
ansible.builtin.import_tasks: peers.yml

- name: Include volume reconfiguration task in play
import_tasks: volume.yml
ansible.builtin.import_tasks: volume.yml

when: gluster_maintenance_old_node is defined and
gluster_maintenance_cluster_node is defined and
gluster_maintenance_cluster_node_2 is defined

# Detach the old host, to replace host with different FQDN usecase
- name: Detach the peer, in the case of different host replacement
command: "gluster peer detach {{ gluster_maintenance_old_node }} force"
ansible.builtin.command: "gluster peer detach {{ gluster_maintenance_old_node }} force"
when: gluster_maintenance_old_node != gluster_maintenance_new_node

- name: Force removal of old node peer in new node
file:
ansible.builtin.file:
path: "{{ glusterd_libdir }}/peers/{{ old_node_uuid.stdout | trim }}"
state: absent
when: gluster_maintenance_old_node != gluster_maintenance_new_node
delegate_to : "{{ gluster_maintenance_new_node }}"
delegate_to: "{{ gluster_maintenance_new_node }}"
connection: ssh

- name: Restart glusterd on the new node
connection: ssh
service:
ansible.builtin.service:
name: glusterd
state: restarted
delegate_to: "{{ gluster_new_node }}"

# Ensure to delete the temporary directory
- name: Delete the temporary directory
file:
ansible.builtin.file:
state: absent
path: "{{ peer_tmp_dir }}"
delegate_to: localhost
run_once: True
run_once: true
Loading