diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 63c96ef70fa..b0b5ca2e5e5 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,8 +15,10 @@ jobs: include: - name: centos-stream-8 container-name: el8stream + pip-command: pip3.8 - name: centos-stream-9 container-name: el9stream + pip-command: pip3 name: ${{ matrix.name }} @@ -27,6 +29,9 @@ jobs: image: quay.io/ovirt/buildcontainer:${{ matrix.container-name }} steps: + - name: Install required PyPI packages + run: ${{ matrix.pip-command }} install "ansible-lint>=6.0.0,<7.0.0" + - name: Checkout sources uses: ovirt/checkout-action@main diff --git a/build/ansible-check.sh b/build/ansible-check.sh index b1b97899e06..f90203433d5 100755 --- a/build/ansible-check.sh +++ b/build/ansible-check.sh @@ -1,27 +1,12 @@ #!/bin/sh -x -# Search for playbooks within specified directories (one level only) -PLABOOKS_DIR="packaging/ansible-runner-service-project/project" - -# Directory with roles -ROLES_DIR="packaging/ansible-runner-service-project/project/roles" - -SRCDIR="$(dirname "$0")/.." - -ANSIBLE_LINT=/usr/bin/ansible-lint ANSIBLE_LINT_CONF="$(dirname "$0")/ansible-lint.conf" -if ! which "${ANSIBLE_LINT}" > /dev/null 2>&1; then - echo "WARNING: tool '${ANSIBLE_LINT}' is missing" >&2 +# Check if the ansible-lint binary exists +if ! command -v ansible-lint > /dev/null 2>&1; then + echo "WARNING: tool 'ansible-lint' is missing" >&2 exit 0 fi -cd "${SRCDIR}" - -# Find playbooks -PARAMS=$(find ${PLABOOKS_DIR} -type f -name '*.yml' -maxdepth 1) - -# Find roles -PARAMS="$PARAMS $(find ${ROLES_DIR} -type d -maxdepth 1)" - -${ANSIBLE_LINT} -c ${ANSIBLE_LINT_CONF} ${PARAMS} +# Run ansible-lint +ansible-lint -c ${ANSIBLE_LINT_CONF} packaging/ansible-runner-service-project/project/roles/* diff --git a/build/ansible-lint.conf b/build/ansible-lint.conf index 1aecc713912..d183749d87a 100644 --- a/build/ansible-lint.conf +++ b/build/ansible-lint.conf @@ -1,5 +1,10 @@ skip_list: - # [E701]: "meta/main.yml should contain relevant info" + # meta-no-info:: "meta/main.yml should contain relevant info" # Roles in ovirt-engine are not intended to be used/imported by Ansible Galaxy - - '701' - + - 'meta-no-info' + # role-name: Role name does not match ``^[a-z][a-z0-9_]*$`` pattern. + - 'role-name' + # name: All tasks should be named. (name[missing]) + - 'name' + # no-changed-when: Commands should not change things if nothing needs doing. + - 'no-changed-when' diff --git a/packaging/ansible-runner-service-project/project/ovirt-host-upgrade.yml b/packaging/ansible-runner-service-project/project/ovirt-host-upgrade.yml index d2f518027e6..79ebe89a427 100644 --- a/packaging/ansible-runner-service-project/project/ovirt-host-upgrade.yml +++ b/packaging/ansible-runner-service-project/project/ovirt-host-upgrade.yml @@ -9,8 +9,6 @@ pre_tasks: - include_vars: ovirt_host_upgrade_vars.yml - tags: - - skip_ansible_lint # E502 - include: ovirt-host-yum-conf.yml diff --git a/packaging/ansible-runner-service-project/project/roles/gluster-brick-create/tasks/lvmcache.yml b/packaging/ansible-runner-service-project/project/roles/gluster-brick-create/tasks/lvmcache.yml index 96de86de350..6e475c86513 100644 --- a/packaging/ansible-runner-service-project/project/roles/gluster-brick-create/tasks/lvmcache.yml +++ b/packaging/ansible-runner-service-project/project/roles/gluster-brick-create/tasks/lvmcache.yml @@ -1,27 +1,27 @@ --- # rc 5 = Physical volume '/dev/name' is already in volume group - name: Setup SSD for caching | Extend the Volume Group - command: "vgextend --dataalignment 256K {{ vgname }} {{ ssd }}" + ansible.builtin.command: "vgextend --dataalignment 256K {{ vgname }} {{ ssd }}" register: resp failed_when: resp.rc not in [0, 5] changed_when: resp.rc == 0 # rc 5 = Physical volume '/dev/name' is already in volume group - name: Setup SSD for caching | Create LV for cache - command: "lvcreate -L {{ cache_lvsize }} -n {{ cache_lvname }} {{ vgname }}" + ansible.builtin.command: "lvcreate -L {{ cache_lvsize }} -n {{ cache_lvname }} {{ vgname }}" register: resp failed_when: resp.rc not in [0, 5] changed_when: resp.rc == 0 - name: Setup SSD for caching | Create metadata LV for cache - command: "lvcreate -L {{ cache_meta_lvsize }} -n {{ cache_meta_lv }} {{ vgname }}" + ansible.builtin.command: "lvcreate -L {{ cache_meta_lvsize }} -n {{ cache_meta_lv }} {{ vgname }}" when: cache_meta_lv is defined and cache_meta_lv != ' ' register: resp failed_when: resp.rc not in [0, 5] changed_when: resp.rc == 0 - name: Setup SSD for caching | Convert logical volume to a cache pool LV - command: > + ansible.builtin.command: > lvconvert -y --type cache-pool --poolmetadata {{ cache_meta_lv }} --poolmetadataspare n --cachemode {{ cachemode | default('writethrough') }} @@ -35,7 +35,7 @@ # It is valid not to have cachemetalvname! Writing a separate task not to # complicate things. - name: Setup SSD for caching | Convert logical volume to a cache pool LV without cachemetalvname - command: > + ansible.builtin.command: > lvconvert -y --type cache-pool --poolmetadataspare n --cachemode {{ cachemode | default('writethrough') }} @@ -47,11 +47,9 @@ # Run lvs -a -o +devices to see the cache settings - name: Setup SSD for caching | Convert an existing logical volume to a cache LV - command: > + ansible.builtin.command: > lvconvert -y --type cache --cachepool "/dev/{{ vgname }}/{{ cache_lvname }}" "/dev/{{ vgname }}/{{ lvname }}_pool" register: resp failed_when: resp.rc not in [0, 5] changed_when: resp.rc == 0 - tags: - - skip_ansible_lint diff --git a/packaging/ansible-runner-service-project/project/roles/gluster-brick-create/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/gluster-brick-create/tasks/main.yml index 495f8af3d45..659a32693b9 100644 --- a/packaging/ansible-runner-service-project/project/roles/gluster-brick-create/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/gluster-brick-create/tasks/main.yml @@ -1,7 +1,7 @@ --- # rc 1 = Device or resource busy - name: Clean up filesystem signature - command: wipefs -a {{ item }} + ansible.builtin.command: wipefs -a {{ item }} with_items: "{{ disks | default([]) }}" when: wipefs == 'yes' and item is defined register: resp @@ -11,25 +11,25 @@ # needed if we can always assume 256K for JBOD, however we provide this extra # variable to override it. - name: Set PV data alignment for JBOD - set_fact: + ansible.builtin.set_fact: pv_dataalign: "{{ gluster_infra_dalign | default('256K') }}" - when: disktype|upper in ['NONE', 'RAID0'] + when: disktype | upper in ['NONE', 'RAID0'] # Set data alignment for RAID # We need KiB: ensure to keep the trailing `K' in the pv_dataalign calculation. - name: Set PV data alignment for RAID - set_fact: - pv_dataalign: "{{ diskcount|int * stripesize|int }}K" - when: disktype|upper in ['RAID6', 'RAID10'] + ansible.builtin.set_fact: + pv_dataalign: "{{ diskcount | int * stripesize | int }}K" + when: disktype | upper in ['RAID6', 'RAID10'] - name: Set VG physical extent size for RAID - set_fact: - vg_pesize: "{{ diskcount|int * stripesize|int }}K" - when: disktype|upper in ['RAID6', 'RAID10'] + ansible.builtin.set_fact: + vg_pesize: "{{ diskcount | int * stripesize | int }}K" + when: disktype | upper in ['RAID6', 'RAID10'] # rc 3 = already exists in filesystem - name: Create volume groups - command: "vgcreate --dataalignment {{ pv_dataalign }} -s {{ vg_pesize | default(4) }} {{ vgname }} {{ disks | join(' ') }}" + ansible.builtin.command: "vgcreate --dataalignment {{ pv_dataalign }} -s {{ vg_pesize | default(4) }} {{ vgname }} {{ disks | join(' ') }}" register: resp failed_when: resp.rc not in [0, 3] changed_when: resp.rc == 0 @@ -42,47 +42,52 @@ # to full_stripe_size # - name: Calculate chunksize for RAID6/RAID10 - set_fact: - lv_chunksize: "{{ stripesize|int * diskcount|int }}K" - when: disktype|upper in ['RAID6', 'RAID10'] + ansible.builtin.set_fact: + lv_chunksize: "{{ stripesize | int * diskcount | int }}K" + when: disktype | upper in ['RAID6', 'RAID10'] # For JBOD the thin pool chunk size is set to 256 KiB. - name: Set chunksize for JBOD - set_fact: + ansible.builtin.set_fact: lv_chunksize: '256K' - when: disktype|upper in ['NONE', 'RAID0'] + when: disktype | upper in ['NONE', 'RAID0'] # rc 5 = Logical Volume 'name' already exists in volume group. - name: Create a LV thinpool - command: "lvcreate -l 100%FREE --chunksize {{ lv_chunksize }} --poolmetadatasize {{ pool_metadatasize }} --zero n --type thin-pool --thinpool {{ lvname }}_pool {{ vgname }}" + ansible.builtin.command: > + "lvcreate -l 100%FREE --chunksize {{ lv_chunksize }} --poolmetadatasize {{ pool_metadatasize }} --zero n + --type thin-pool --thinpool {{ lvname }}_pool {{ vgname }}" register: resp failed_when: resp.rc not in [0, 5] changed_when: resp.rc == 0 # rc 5 = Logical Volume 'name' already exists in volume group. - name: Create thin logical volume - command: "lvcreate -T {{ vgname }}/{{ lvname }}_pool -V {{ size }} -n {{ lvname }}" + ansible.builtin.command: "lvcreate -T {{ vgname }}/{{ lvname }}_pool -V {{ size }} -n {{ lvname }}" register: resp failed_when: resp.rc not in [0, 5] changed_when: resp.rc == 0 -- include_tasks: lvmcache.yml +- ansible.builtin.include_tasks: lvmcache.yml when: ssd is defined and ssd # rc 1 = Filesystem already exists - name: Create an xfs filesystem - command: "mkfs.xfs -f -K -i size=512 -n size=8192 {% if 'raid' in disktype %} -d sw={{ diskcount }},su={{ stripesize }}k {% endif %} /dev/{{ vgname }}/{{ lvname }}" + ansible.builtin.command: > + "mkfs.xfs -f -K -i size=512 -n size=8192 {% if 'raid' in disktype %} -d sw={{ diskcount }},su={{ stripesize }}k + {% endif %} /dev/{{ vgname }}/{{ lvname }}" register: resp failed_when: resp.rc not in [0, 1] changed_when: resp.rc == 0 - name: Create the backend directory, skips if present - file: + ansible.builtin.file: path: "{{ mntpath }}" state: directory + mode: 0755 - name: Mount the brick - mount: + ansible.posix.mount: name: "{{ mntpath }}" src: "/dev/{{ vgname }}/{{ lvname }}" fstype: "{{ fstype }}" @@ -90,6 +95,6 @@ state: mounted - name: Set SELinux labels on the bricks - command: "chcon -t glusterd_brick_t {{ mntpath }}" + ansible.builtin.command: "chcon -t glusterd_brick_t {{ mntpath }}" register: resp changed_when: resp.rc == 0 diff --git a/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/defaults/main.yml b/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/defaults/main.yml index b46d7d80bdc..2516f0958d7 100644 --- a/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/defaults/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/defaults/main.yml @@ -1,2 +1,2 @@ --- -# defaults file for replace_node \ No newline at end of file +# defaults file for replace_node diff --git a/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/authorization.yml b/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/authorization.yml index 19acbbef8d9..5c5888ca37e 100644 --- a/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/authorization.yml +++ b/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/authorization.yml @@ -1,10 +1,10 @@ - name: Make sure authorized_keys file is present - stat: + ansible.builtin.stat: path: "/root/.ssh/authorized_keys" register: authkey - name: Copy the authorized_keys from the active host to the new host - synchronize: + ansible.posix.synchronize: src: "/root/.ssh/authorized_keys" dest: "/root/.ssh/authorized_keys" mode: pull diff --git a/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/main.yml index f75c0a6c662..d4450af558e 100644 --- a/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/main.yml @@ -2,16 +2,16 @@ # Peer restoration # Create tmp dir for storing peer data - name: Create temporary storage directory - tempfile: + ansible.builtin.tempfile: state: directory suffix: _peer register: tmpdir delegate_to: localhost - run_once: True + run_once: true # Set the glusterd location - name: Set the path of glusterd.info file - set_fact: + ansible.builtin.set_fact: glusterd_libdir: "/var/lib/glusterd" peer_tmp_dir: "{{ tmpdir['path'] }}" @@ -19,16 +19,16 @@ block: - name: Include pre-task in play - import_tasks: pre-task.yml + ansible.builtin.import_tasks: pre-task.yml - name: Include authorization task in play - import_tasks: authorization.yml + ansible.builtin.import_tasks: authorization.yml - name: Include peers reconfiguration task in play - import_tasks: peers.yml + ansible.builtin.import_tasks: peers.yml - name: Include volume reconfiguration task in play - import_tasks: volume.yml + ansible.builtin.import_tasks: volume.yml when: gluster_maintenance_old_node is defined and gluster_maintenance_cluster_node is defined and @@ -36,28 +36,28 @@ # Detach the old host, to replace host with different FQDN usecase - name: Detach the peer, in the case of different host replacement - command: "gluster peer detach {{ gluster_maintenance_old_node }} force" + ansible.builtin.command: "gluster peer detach {{ gluster_maintenance_old_node }} force" when: gluster_maintenance_old_node != gluster_maintenance_new_node - name: Force removal of old node peer in new node - file: + ansible.builtin.file: path: "{{ glusterd_libdir }}/peers/{{ old_node_uuid.stdout | trim }}" state: absent when: gluster_maintenance_old_node != gluster_maintenance_new_node - delegate_to : "{{ gluster_maintenance_new_node }}" + delegate_to: "{{ gluster_maintenance_new_node }}" connection: ssh - name: Restart glusterd on the new node connection: ssh - service: + ansible.builtin.service: name: glusterd state: restarted delegate_to: "{{ gluster_new_node }}" # Ensure to delete the temporary directory - name: Delete the temporary directory - file: + ansible.builtin.file: state: absent path: "{{ peer_tmp_dir }}" delegate_to: localhost - run_once: True + run_once: true diff --git a/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/peers.yml b/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/peers.yml index 3ac04fc760d..6fcfc8b24a0 100644 --- a/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/peers.yml +++ b/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/peers.yml @@ -2,49 +2,46 @@ - name: Validating the state of the old host block: - name: Fetch the UUID of the old node - shell: > + ansible.builtin.shell: > gluster peer status | grep -A1 {{ gluster_old_node | mandatory }} | awk -F: '/uid/ { print $2}' register: uuid - tags: - - skip_ansible_lint # E301 - name: Get the state from peer file - shell: grep state "/var/lib/glusterd/peers/{{ uuid.stdout | trim }}" + ansible.builtin.command: + argv: + - grep + - state + - "/var/lib/glusterd/peers/{{ uuid.stdout | trim }}" register: grepres1 - tags: - - skip_ansible_lint # E301 - name: Get the state from the peer command - shell: > + ansible.builtin.shell: > gluster peer status | grep -A1 {{ uuid.stdout | trim }} | grep Connected | wc -l ignore_errors: true register: grepres2 - tags: - - skip_ansible_lint # E301 - name: Fail the play if the previous command did not succeed - fail: + ansible.builtin.fail: msg: "{{ gluster_old_node }} is already in connected state" when: grepres1.stdout == "state=3" and grepres2.stdout == "1" run_once: true - name: Get the UUID of the old node - shell: > + ansible.builtin.shell: # noqa risky-shell-pipe + > gluster peer status | grep -A1 {{ gluster_old_node | mandatory }} | awk -F: '/uid/ { print $2}' register: old_node_uuid run_once: true - tags: - - skip_ansible_lint # E301 - name: Fail if parsed hostname is different from the back-end FQDN - fail: msg="Hostname mentioned in inventory should be same with back-end gluster FQDN" + ansible.builtin.fail: msg="Hostname mentioned in inventory should be same with back-end gluster FQDN" when: old_node_uuid.stdout | bool tags: - skip_ansible_lint # E602 @@ -54,12 +51,12 @@ block: # probe the new host, if new host is not the same old host - name: Peer probe the new host - command: "gluster peer probe {{ gluster_new_node }}" + ansible.builtin.command: "gluster peer probe {{ gluster_new_node }}" # Workaround for Gluster Bug doesn't copy the peer file in rejected state - name: Workaround for the bug to copy peer file of reject node connection: ssh - synchronize: + ansible.posix.synchronize: src: "{{ glusterd_libdir }}/peers/{{ old_node_uuid.stdout | trim }}" dest: "{{ glusterd_libdir }}/peers/" mode: pull @@ -67,7 +64,7 @@ - name: Restart glusterd on the new node connection: ssh - service: + ansible.builtin.service: name: glusterd state: restarted delegate_to: "{{ gluster_new_node }}" @@ -77,28 +74,26 @@ - name: Peer restoration in the case of replacing with same FQDN block: - name: Get the UUID of the current node - shell: > + ansible.builtin.shell: > cat "{{ glusterd_libdir }}"/glusterd.info | awk -F= '/UUID/ { print $2}' register: current_node_uuid run_once: true - tags: - - skip_ansible_lint # E301 - name: Fail if current node UUID is empty - fail: msg="Execute this playbook on the active host" + ansible.builtin.fail: msg="Execute this playbook on the active host" when: current_node_uuid.stdout | bool tags: - skip_ansible_lint # E602 - name: Store the UUID - set_fact: + ansible.builtin.set_fact: old_uuid: "{{ old_node_uuid.stdout | trim }}" current_uuid: "{{ current_node_uuid.stdout | trim }}" - name: Collect all the peer files from cluster_node connection: ssh - synchronize: + ansible.posix.synchronize: src: "{{ glusterd_libdir }}/peers/" dest: "{{ peer_tmp_dir }}/" mode: pull @@ -106,47 +101,48 @@ - name: Check whether peer file of cluster_node is available connection: ssh - stat: + ansible.builtin.stat: path: "{{ glusterd_libdir }}/peers/{{ current_uuid }}" register: peerfileres delegate_to: "{{ gluster_cluster_node_2 | mandatory }}" - name: Fetch the peer file of the current node connection: ssh - fetch: + ansible.builtin.fetch: src: "{{ glusterd_libdir }}/peers/{{ current_uuid }}" dest: "{{ peer_tmp_dir }}/" - flat: yes + flat: true delegate_to: "{{ gluster_cluster_node_2 | mandatory }}" when: peerfileres.stat.isreg is defined - name: Fetch the peer file of the current node connection: ssh - fetch: + ansible.builtin.fetch: src: "{{ glusterd_libdir }}/peers/{{ current_uuid }}" dest: "{{ peer_tmp_dir }}/" - flat: yes + flat: true delegate_to: "{{ gluster_cluster_node | mandatory }}" when: peerfileres.stat.isreg is not defined - name: Remove the old node uuid from the extracted peer details connection: ssh - file: + ansible.builtin.file: path: "{{ peer_tmp_dir }}/{{ old_uuid }}" state: absent delegate_to: "{{ gluster_cluster_node }}" - name: Copy all the peer files to the new host connection: ssh - copy: + ansible.builtin.copy: src: "{{ peer_tmp_dir }}/" dest: "{{ glusterd_libdir }}/peers/" + mode: 0755 delegate_to: "{{ gluster_new_node }}" - name: Edit the new node's glusterd.info connection: ssh - lineinfile: + ansible.builtin.lineinfile: path: "{{ glusterd_libdir }}/glusterd.info" regexp: '^UUID=' line: "UUID={{ old_uuid }}" @@ -154,12 +150,12 @@ - name: Restart glusterd connection: ssh - service: + ansible.builtin.service: name: glusterd state: restarted delegate_to: "{{ gluster_new_node | mandatory }}" - name: Pausing for 5 seconds - pause: seconds=5 + ansible.builtin.pause: seconds=5 when: gluster_old_node == gluster_new_node diff --git a/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/pre-task.yml b/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/pre-task.yml index 84161a0b004..8b1fe6c1b92 100644 --- a/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/pre-task.yml +++ b/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/pre-task.yml @@ -1,63 +1,67 @@ -#check if there exist more than 1 hostname in the gluster peer status if not, it skips the pre-task -#if parsed input is ip itself and there exist only 1 hostname in gluster peer status , then we can safely assume that gluster is configured with gluster ip address -# [1]Not Needed: check if that is ip or not, if not ip then copy the playbook hostname directly to further tasks -# if the parsed input is hostname, and there exist only 1 hostname in the gluster peer status , then we can assume that gluster is configured with hostnames itself -#[1]Not Needed :if its ip then replace the hostname with ip and parse it as well (more or less the same as above) -#[1]no need of those two steps as it already would be set in the playbook. -#check if old node nad new node are same , if its same then parse the same name while storing the playbook else leave it +# Check if there exist more than 1 hostname in the gluster peer status if not, it skips the pre-task +# If parsed input is ip itself and there exist only 1 hostname in gluster peer status , then we can safely assume that +# gluster is configured with gluster ip address +# [1] Not Needed: check if that is ip or not, if not ip then copy the playbook hostname directly to further tasks +# If the parsed input is hostname, and there exist only 1 hostname in the gluster peer status , then we can assume that +# gluster is configured with hostnames itself +# [1] Not Needed :if its ip then replace the hostname with ip and parse it as well (more or less the same as above) +# [1] No need of those two steps as it already would be set in the playbook. +# Check if old node nad new node are same , if its same then parse the same name while storing the playbook else leave it --- - name: Setting the hostname's from playbook - set_fact: + ansible.builtin.set_fact: gluster_old_node: "{{ gluster_maintenance_old_node }}" gluster_new_node: "{{ gluster_maintenance_new_node }}" gluster_cluster_node: "{{ gluster_maintenance_cluster_node }}" gluster_cluster_node_2: "{{ gluster_maintenance_cluster_node_2 }}" - name: Get the number of hosts in the cluster - shell: > + ansible.builtin.shell: > + set -o pipefail && \ cat /var/lib/glusterd/peers/* | grep -c uuid + args: + executable: /bin/bash register: host_count - tags: - - skip_ansible_lint # E301 - name: Get the number of hostname in the peer output - shell: > + ansible.builtin.shell: > + set -o pipefail && \ cat /var/lib/glusterd/peers/* | grep -c hostname + args: + executable: /bin/bash register: hostname_count - tags: - - skip_ansible_lint # E301 - name: Check if Parsed hosts are Ipv4 or not - shell: > + ansible.builtin.shell: > + set -o pipefail && \ echo "{{ gluster_maintenance_cluster_node }}" | grep -E -q "([0-9]{1,3}[\.]){3}[0-9]{1,3}" && echo true || echo false + args: + executable: /bin/bash register: isipv4 - tags: - - skip_ansible_lint # E301 - name: Check if Parsed hosts are IPv6 or not - shell : > + ansible.builtin.shell: > + set -o pipefail && \ echo "fe80::24c0:20ff:fec2:8f6c" | grep -Po -q \ '(? + ansible.builtin.shell: > cat /var/lib/glusterd/peers/* | grep -A1 -B1 {{ gluster_old_node | mandatory }} | grep -v {{ gluster_old_node }} | grep hostname | sed -n -e 's/^.*'='//p' register: old_node_temp - tags: - - skip_ansible_lint # E301 - name: Fetch the maintenance host's hostname from the ip - shell: > + ansible.builtin.shell: > cat /var/lib/glusterd/peers/* | grep -A1 -B1 {{ gluster_cluster_node | mandatory }} | grep -v {{ gluster_cluster_node }} | grep hostname | sed -n -e 's/^.*'='//p' @@ -68,28 +72,24 @@ - skip_ansible_lint # E301 - name: Fetch the Cluster maintenance host's hostname from the ip - shell: > + ansible.builtin.shell: > cat /var/lib/glusterd/peers/* | grep -A1 -B1 {{ gluster_cluster_node_2 | mandatory }} | grep -v {{ gluster_cluster_node_2 }} | grep hostname | sed -n -e 's/^.*'='//p' register: cluster_node_2_temp delegate_to: "{{ gluster_cluster_node | mandatory }}" connection: ssh - tags: - - skip_ansible_lint # E301 - name: Store the HostName's - set_fact: - gluster_old_node: "{{ old_node_temp.stdout | trim }}" - gluster_cluster_node: "{{ cluster_node_temp.stdout| trim }}" - gluster_cluster_node_2: "{{ cluster_node_2_temp.stdout | trim }}" - cacheable: yes + ansible.builtin.set_fact: + gluster_old_node: "{{ old_node_temp.stdout | trim }}" + gluster_cluster_node: "{{ cluster_node_temp.stdout | trim }}" + gluster_cluster_node_2: "{{ cluster_node_2_temp.stdout | trim }}" + cacheable: true - name: Store the new Hostname if its same as old - set_fact: + ansible.builtin.set_fact: gluster_new_node: "{{ old_node_temp.stdout | trim }}" when: gluster_maintenance_old_node == gluster_maintenance_new_node - when: host_count.stdout != hostname_count.stdout and isipv4.stdout == "true" or isipv6.stdout == "true" - tags: - - skip_ansible_lint # E301 + when: host_count.stdout != hostname_count.stdout and isipv4.stdout == "true" or isipv6.stdout == "true" diff --git a/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/volume.yml b/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/volume.yml index 80428323291..6fa46f6f260 100644 --- a/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/volume.yml +++ b/packaging/ansible-runner-service-project/project/roles/gluster-replace-peers/tasks/volume.yml @@ -3,43 +3,38 @@ - name: Fetch the directory and volume details block: - name: Get the list of volumes on the machine - shell: ls "{{ glusterd_libdir }}/vols" + ansible.builtin.command: + argv: + - ls + - "{{ glusterd_libdir }}/vols" register: dir_list - tags: - - skip_ansible_lint # E301 - name: Setting Facts for Volume - set_fact: + ansible.builtin.set_fact: volumes: "{{ dir_list.stdout.split() }}" # Find the list of bricks on the machine - name: Get the list of bricks corresponding to volume - shell: > + ansible.builtin.shell: > gluster vol info {{ item }} | grep {{ gluster_maintenance_old_node }} | cut -d' ' -f2 | awk -F: '{ print $2 }' with_items: "{{ volumes }}" register: brick_list - tags: - - skip_ansible_lint # E301 delegate_to: "{{ gluster_cluster_node }}" connection: ssh - tags: - - skip_ansible_lint # E301 - name: Run replace-brick commit on the brick connection: ssh - shell: > + ansible.builtin.shell: > [ ! -d {{ item.1 }} ] && gluster volume replace-brick {{ item.0.item }} {{ gluster_old_node }}:{{ item.1 }} {{ gluster_new_node }}:{{ item.1 }} commit force register: ret - failed_when: ret.rc >=2 + failed_when: ret.rc >= 2 with_subelements: - "{{ brick_list.results }}" - stdout_lines delegate_to: "{{ gluster_new_node }}" - tags: - - skip_ansible_lint # E301 diff --git a/packaging/ansible-runner-service-project/project/roles/hc-gluster-cgroups/handlers/main.yml b/packaging/ansible-runner-service-project/project/roles/hc-gluster-cgroups/handlers/main.yml index ce9fa1619ea..22de3593905 100644 --- a/packaging/ansible-runner-service-project/project/roles/hc-gluster-cgroups/handlers/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/hc-gluster-cgroups/handlers/main.yml @@ -1,5 +1,4 @@ - name: Restart glusterd - service: + ansible.builtin.service: name: glusterd state: restarted - diff --git a/packaging/ansible-runner-service-project/project/roles/hc-gluster-cgroups/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/hc-gluster-cgroups/tasks/main.yml index df4a24deb53..de1ce1af858 100644 --- a/packaging/ansible-runner-service-project/project/roles/hc-gluster-cgroups/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/hc-gluster-cgroups/tasks/main.yml @@ -1,26 +1,27 @@ --- - name: Create systemd overrides directory for glusterd - file: + ansible.builtin.file: path: /etc/systemd/system/glusterd.service.d/ state: directory + mode: 0755 - name: Create glusterd cgroups CPU configuration file - copy: + ansible.builtin.copy: src: 99-cpu.conf dest: /etc/systemd/system/glusterd.service.d/99-cpu.conf - mode: preserve + mode: 0644 notify: - Restart glusterd - name: Set CPU quota - template: + ansible.builtin.template: src: glusterfs.slice.j2 dest: /etc/systemd/system/glusterfs.slice + mode: 0644 vars: - gluster_cgroup_cpu_quota : "{{ [(ansible_processor_vcpus/3)|int,1]|max * 100 }}" + gluster_cgroup_cpu_quota: "{{ [(ansible_processor_vcpus / 3) | int, 1] | max * 100 }}" notify: - Restart glusterd - name: Disable unrelated gluster hooks - script: disable-gluster-hooks.sh - + ansible.builtin.script: disable-gluster-hooks.sh diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-check-upgrade/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-check-upgrade/tasks/main.yml index c70f0b12ffc..5489add12e9 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-check-upgrade/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-check-upgrade/tasks/main.yml @@ -1,42 +1,47 @@ --- - name: Check for system updates (remove empty lines) on EL7 - shell: yum check-update -q --exclude=ansible | grep '^[a-zA-Z0-9\_\-]\+[.][a-zA-Z0-9\_\-]\+' | cut -d ' ' -f1 | sed '/^$/d' >> /tmp/yum_updates + ansible.builtin.shell: # noqa risky-shell-pipe + > + yum check-update -q --exclude=ansible | grep '^[a-zA-Z0-9\_\-]\+[.][a-zA-Z0-9\_\-]\+' | cut -d ' ' -f1 | + sed '/^$/d' >> /tmp/yum_updates register: yum_updates_res - when: el_ver|int == 7 + when: el_ver | int == 7 failed_when: "'rc' in yum_updates_res and yum_updates_res.rc == 1" tags: - - updatecheck - - skip_ansible_lint # E305 + - updatecheck + - skip_ansible_lint # E305 # Grep for packages of the following form ensures package name starts at the beginning of a line, contains only # valid package name characters, and is made of a valid package name form # For example, Packages that will be found: redhat-virtualization-host-image-update.noarch, tar-1.32-6.fc33.x86_64 # NetworkManager.x86_64, cockpit.x86_64 - name: Check for system updates (remove empty lines) - shell: yum check-update -q | grep '^[a-zA-Z0-9\_\-]\+[.][a-zA-Z0-9\_\-]\+' | cut -d ' ' -f1 | sed '/^$/d' >> /tmp/yum_updates + ansible.builtin.shell: # noqa risky-shell-pipe + > + yum check-update -q | grep '^[a-zA-Z0-9\_\-]\+[.][a-zA-Z0-9\_\-]\+' | cut -d ' ' -f1 | sed '/^$/d' >> /tmp/yum_updates register: yum_updates_res - when: el_ver|int != 7 + when: el_ver | int != 7 failed_when: "'rc' in yum_updates_res and yum_updates_res.rc == 1" tags: - - updatecheck - - skip_ansible_lint # E305 + - updatecheck + - skip_ansible_lint # E305 - name: Remove 'Obsoleting' title from file - lineinfile: + ansible.builtin.lineinfile: path: /tmp/yum_updates line: Obsoleting state: absent - name: Get yum updates file - slurp: + ansible.builtin.slurp: src: /tmp/yum_updates register: yum_updates - name: Get yum updates content - set_fact: + ansible.builtin.set_fact: yum_result: "{{ yum_updates['content'] | b64decode }}" - name: Delete yum_updates file from host - file: + ansible.builtin.file: path: /tmp/yum_updates state: absent diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/defaults/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/defaults/main.yml index 637042ffd3c..a7325a867b8 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/defaults/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/defaults/main.yml @@ -1,2 +1,2 @@ --- -ovirt_origin_type: 'OVIRT' \ No newline at end of file +ovirt_origin_type: 'OVIRT' diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/av-setup.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/av-setup.yml index 007cfd334b7..7a8525ccffb 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/av-setup.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/av-setup.yml @@ -1,18 +1,16 @@ --- - block: - name: Reset configuration of advanced virtualization module - shell: dnf -y module reset virt - tags: - - skip_ansible_lint # E305 + ansible.builtin.command: dnf -y module reset virt - name: Set virt module version for RHEL 8.6 and newer - set_fact: + ansible.builtin.set_fact: virt_mod_ver: "rhel" when: - ansible_distribution_version is version('8.6', '>=') - name: Set virt module version for RHEL 8.4 and 8.5 - set_fact: + ansible.builtin.set_fact: virt_mod_ver: "av" when: - virt_mod_ver is not defined @@ -20,16 +18,20 @@ - ansible_distribution_version is version('8.5', '<=') - name: Set virt module version for RHEL 8.3 and older - set_fact: + ansible.builtin.set_fact: virt_mod_ver: "{{ ansible_distribution_version }}" when: - virt_mod_ver is not defined - ansible_distribution_version is version('8.3', '<=') - name: Enable advanced virtualization module - shell: "dnf -y module enable virt:{{ virt_mod_ver }}" - tags: - - skip_ansible_lint # E305 + ansible.builtin.command: + argv: + - dnf + - -y + - module + - enable + - "virt:{{ virt_mod_ver }}" when: - host_deploy_origin_type != ovirt_origin_type diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/cinderlib-setup.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/cinderlib-setup.yml index 93aa76140e8..96299ccb195 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/cinderlib-setup.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/cinderlib-setup.yml @@ -1,46 +1,42 @@ --- - block: - name: Check if Cinderlib packages are available - package: + ansible.builtin.package: name: python3-os-brick state: present - check_mode: yes + check_mode: true ignore_errors: true register: cinderlib_check - name: Enable Cinderlib repository - shell: "subscription-manager repos --enable=openstack-16.2-cinderlib-for-rhel-8-x86_64-rpms" + ansible.builtin.command: "subscription-manager repos --enable=openstack-16.2-cinderlib-for-rhel-8-x86_64-rpms" when: cinderlib_check is failed - ignore_errors: True + ignore_errors: true register: cinderlib_repo_install - tags: - - skip_ansible_lint # E305 - name: Check for Cinderlib repository configuration error - debug: + ansible.builtin.debug: msg: "[ERROR] Cannot enable Cinderlib repositories, please check documentation how to enable them manually" when: - cinderlib_check is failed - cinderlib_repo_install.rc != 0 - name: Check if Ceph packages are available - package: + ansible.builtin.package: name: ceph-common state: present - check_mode: yes + check_mode: true ignore_errors: true register: ceph_check - name: Enable Ceph repository - shell: "subscription-manager repos --enable=rhceph-4-tools-for-rhel-8-x86_64-rpms" + ansible.builtin.command: "subscription-manager repos --enable=rhceph-4-tools-for-rhel-8-x86_64-rpms" when: ceph_check is failed - ignore_errors: True + ignore_errors: true register: ceph_repo_install - tags: - - skip_ansible_lint # E305 - name: Check for Ceph repository configuration error - debug: + ansible.builtin.debug: msg: "[ERROR] Cannot enable Ceph repositories, please check documentation how to enable them manually" when: - ceph_check is failed diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/host-os.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/host-os.yml index ef04ae8ea85..df65d9e51b3 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/host-os.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/host-os.yml @@ -1,44 +1,44 @@ --- - name: Detect host operating system - set_fact: - el_ver: "{{ ansible_distribution_major_version|int + ansible.builtin.set_fact: + el_ver: "{{ ansible_distribution_major_version | int if ansible_os_family == 'RedHat' else 0 }}" - fc_ver: "{{ ansible_distribution_major_version|int + fc_ver: "{{ ansible_distribution_major_version | int if ansible_distribution == 'Fedora' else 0 }}" - name: Fetch installed packages - package_facts: + ansible.builtin.package_facts: manager: rpm - name: Check if vdsm is preinstalled - set_fact: + ansible.builtin.set_fact: vdsm_preinstalled: "{{ 'vdsm' in ansible_facts.packages }}" - name: Parse operating system release - lineinfile: + ansible.builtin.lineinfile: path: /etc/os-release line: 'VARIANT_ID="ovirt-node"' - check_mode: yes + check_mode: true register: node_presence - name: Detect if host is a prebuilt image - set_fact: + ansible.builtin.set_fact: node_host: "{{ not node_presence.changed }}" - block: - - name: Check version of ovirt-openvswitch before upgrade - set_fact: - ovirt_openvswitch_pre: "{{ ansible_facts.packages['ovirt-openvswitch'] | first }}" - when: - - ansible_facts.packages['ovirt-openvswitch'] is defined + - name: Check version of ovirt-openvswitch before upgrade + ansible.builtin.set_fact: + ovirt_openvswitch_pre: "{{ ansible_facts.packages['ovirt-openvswitch'] | first }}" + when: + - ansible_facts.packages['ovirt-openvswitch'] is defined - - name: Check version of rhv-openvswitch before upgrade - set_fact: - ovirt_openvswitch_pre: "{{ ansible_facts.packages['rhv-openvswitch'] | first }}" - when: - - ansible_facts.packages['rhv-openvswitch'] is defined + - name: Check version of rhv-openvswitch before upgrade + ansible.builtin.set_fact: + ovirt_openvswitch_pre: "{{ ansible_facts.packages['rhv-openvswitch'] | first }}" + when: + - ansible_facts.packages['rhv-openvswitch'] is defined when: - check_ovs_version is defined diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/main.yml index 6de2c6d6ad1..59086dd9eda 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-facts/tasks/main.yml @@ -1,18 +1,18 @@ --- -- include_tasks: "{{ item }}.yml" +- ansible.builtin.include_tasks: "{{ item }}.yml" with_items: - host-os - av-setup - cinderlib-setup - name: Ensure Python3 is installed for CentOS/RHEL8 hosts - yum: + ansible.builtin.yum: name: python3 state: present - when: el_ver|int >= 8 + when: el_ver | int >= 8 - name: Setup Ansible Python interpreter - set_fact: - ansible_python_interpreter : "{{ '/usr/bin/python3' - if (fc_ver|int > 0 or el_ver|int >= 8) + ansible.builtin.set_fact: + ansible_python_interpreter: "{{ '/usr/bin/python3' + if (fc_ver | int > 0 or el_ver | int >= 8) else '/usr/bin/python2' }}" diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-firewalld/tasks/firewalld.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-firewalld/tasks/firewalld.yml index b92cea90812..2f7816fa749 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-firewalld/tasks/firewalld.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-firewalld/tasks/firewalld.yml @@ -1,25 +1,25 @@ - name: Check for firewalld rules file - stat: + ansible.builtin.stat: path: "{{ role_path }}/vars/{{ firewald_rules_file }}" register: firewalld_rule_file_stat delegate_to: localhost - block: - - name: Include firewalld rules - include_vars: - file: "{{ firewald_rules_file }}" - name: cluster_vars - register: include_cluster_vars + - name: Include firewalld rules + ansible.builtin.include_vars: + file: "{{ firewald_rules_file }}" + name: cluster_vars + register: include_cluster_vars - # We need to ignore errors in case the service doesn't exist. - - name: Enable firewalld rules - firewalld: - port: "{{ item.port | default(omit) }}" - service: "{{ item.service | default(omit) }}" - permanent: yes - immediate: yes - state: enabled - with_items: "{{ cluster_vars.host_deploy_firewalld_rules | default([]) }}" - when: "cluster_vars is defined and not include_cluster_vars.failed | default(false)" + # We need to ignore errors in case the service doesn't exist. + - name: Enable firewalld rules + ansible.builtin.firewalld: + port: "{{ item.port | default(omit) }}" + service: "{{ item.service | default(omit) }}" + permanent: true + immediate: true + state: enabled + with_items: "{{ cluster_vars.host_deploy_firewalld_rules | default([]) }}" + when: "cluster_vars is defined and not include_cluster_vars.failed | default(false)" when: firewalld_rule_file_stat.stat.exists diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-firewalld/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-firewalld/tasks/main.yml index 990f77e9e48..b24197515c7 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-firewalld/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-firewalld/tasks/main.yml @@ -1,36 +1,36 @@ --- - block: - name: Stop iptables service if running - service: + ansible.builtin.service: # noqa ignore-errors name: iptables state: stopped - enabled: no - ignore_errors: yes + enabled: false + ignore_errors: true - name: Check if firewalld is installed - package: + ansible.builtin.package: name: firewalld state: present - name: Check if firewalld is runnning - service: + ansible.builtin.service: name: firewalld state: started - enabled: yes + enabled: true - name: Enable SSH port - firewalld: + ansible.builtin.firewalld: port: "{{ ansible_port }}/tcp" - permanent: yes - immediate: yes + permanent: true + immediate: true state: enabled - name: Apply default firewalld rules - include_tasks: firewalld.yml + ansible.builtin.include_tasks: firewalld.yml vars: firewald_rules_file: firewalld_rules_{{ outer_item.0 }}.yml - when: outer_item.1|bool + when: outer_item.1 | bool with_together: - ["common", "virt", "gluster"] - [true, "{{ host_deploy_virt_enabled }}", "{{ host_deploy_gluster_enabled }}"] @@ -38,16 +38,16 @@ loop_var: outer_item - name: Apply cluster specific firewalld rules - include_tasks: firewalld.yml + ansible.builtin.include_tasks: firewalld.yml vars: firewald_rules_file: firewalld_rules_{{ outer_item.0 }}_{{ host_deploy_cluster_version }}.yml - when: outer_item.1|bool + when: outer_item.1 | bool with_together: - ["common", "virt", "gluster"] - [true, "{{ host_deploy_virt_enabled }}", "{{ host_deploy_gluster_enabled }}"] loop_control: loop_var: outer_item when: - - host_deploy_override_firewall|bool + - host_deploy_override_firewall | bool - host_deploy_firewall_type == 'FIREWALLD' diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-hosted-engine/tasks/deploy.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-hosted-engine/tasks/deploy.yml index 0261b53f0d4..f0111b3177c 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-hosted-engine/tasks/deploy.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-hosted-engine/tasks/deploy.yml @@ -1,30 +1,28 @@ --- - name: Verify platform is suitable for hosted engine - fail: + ansible.builtin.fail: msg: "Hosted Engine support requested on unsupported machine: {{ ansible_facts.architecture }}. Disabling" when: ansible_facts.architecture != 'x86_64' -- name: populate service facts - service_facts: +- name: Populate service facts + ansible.builtin.service_facts: - name: Stop services - service: + ansible.builtin.service: name: "{{ item }}" state: stopped loop: - - ovirt-ha-agent.service - - ovirt-ha-broker.service + - ovirt-ha-agent.service + - ovirt-ha-broker.service when: "item in ansible_facts.services" - name: Install ovirt-hosted-engine-setup package - yum: + ansible.builtin.yum: name: ovirt-hosted-engine-setup state: present - tags: - - skip_ansible_lint # E403 - name: Store hosted engine configuration - copy: + ansible.builtin.copy: src: "{{ hosted_engine_tmp_cfg_file }}" dest: "{{ hosted_engine_conf }}" owner: 'vdsm' @@ -32,13 +30,13 @@ mode: 0440 - name: Update host ID in hosted engine configuration - lineinfile: + ansible.builtin.lineinfile: path: "{{ hosted_engine_conf }}" regex: "^host_id=" line: "host_id={{ hosted_engine_host_id }}" - name: Start ovirt-ha-agent service - service: + ansible.builtin.service: name: ovirt-ha-agent - enabled: yes + enabled: true state: restarted diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-hosted-engine/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-hosted-engine/tasks/main.yml index 6136c4c9391..a7713300929 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-hosted-engine/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-hosted-engine/tasks/main.yml @@ -1,8 +1,8 @@ --- - name: Include remove hosted engine tasks - include_tasks: remove.yml + ansible.builtin.include_tasks: remove.yml when: hosted_engine_deploy_action == 'undeploy' - name: Include deploy hosted engine tasks - include_tasks: deploy.yml + ansible.builtin.include_tasks: deploy.yml when: hosted_engine_deploy_action == 'deploy' diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-hosted-engine/tasks/remove.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-hosted-engine/tasks/remove.yml index d4b7552185e..46c6e4d2885 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-hosted-engine/tasks/remove.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-hosted-engine/tasks/remove.yml @@ -1,10 +1,10 @@ --- - name: Remove hosted engine conf file - file: + ansible.builtin.file: path: "{{ hosted_engine_conf }}" state: absent - name: Stop ovirt-ha-agent service - service: + ansible.builtin.service: name: ovirt-ha-agent state: stopped diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-iptables/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-iptables/tasks/main.yml index 43368ee0170..bf732623eda 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-iptables/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-iptables/tasks/main.yml @@ -1,42 +1,40 @@ --- - block: - name: Fail host deploy if firewall type is iptables for hosts other than CentOS 7/ RHEL 7 - fail: - when: el_ver|int != 7 + ansible.builtin.fail: + when: el_ver | int != 7 - name: Get latest iptables-services package - yum: + ansible.builtin.yum: name: iptables-services state: present - name: Store iptables - copy: + ansible.builtin.copy: # noqa template-instead-of-copy content: "{{ host_deploy_iptables_rules }}" dest: "{{ redhat_iptables }}" owner: 'root' mode: 0600 - remote_src: yes + remote_src: true - name: Populate service facts - service_facts: + ansible.builtin.service_facts: - block: # Disabling firewalld to avoid conflict - - name: Stop and disable firewalld - service: + - name: Stop, disable and mask firewalld + ansible.builtin.systemd: name: firewalld enabled: false state: stopped - - - name: Mask firewalld - shell: systemctl mask firewalld + masked: true when: "'firewalld.service' in ansible_facts.services" - name: Restart and enable iptables - service: + ansible.builtin.service: name: iptables enabled: true state: restarted when: - - host_deploy_override_firewall|bool - - host_deploy_firewall_type == 'IPTABLES' + - host_deploy_override_firewall | bool + - host_deploy_firewall_type == 'IPTABLES' diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kdump/defaults/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kdump/defaults/main.yml index 26b53e496f0..8abafeaedc0 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kdump/defaults/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kdump/defaults/main.yml @@ -1,3 +1,3 @@ --- kdump_config_file: '/etc/kdump.conf' -crashkernel_regexp: crashkernel= \ No newline at end of file +crashkernel_regexp: crashkernel= diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kdump/handlers/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kdump/handlers/main.yml index 620f62f536e..ae13354b3d5 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kdump/handlers/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kdump/handlers/main.yml @@ -1,5 +1,5 @@ --- - name: Restart kdump - service: + ansible.builtin.service: name: kdump state: restarted diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kdump/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kdump/tasks/main.yml index b218e546f0d..5eece203e80 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kdump/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kdump/tasks/main.yml @@ -1,34 +1,34 @@ --- - name: Verify if crashkernel parameter is set - lineinfile: + ansible.builtin.lineinfile: dest: /proc/cmdline regexp: crashkernel_regexp - check_mode: yes + check_mode: true register: crashkernel_param failed_when: crashkernel_param.changed - name: Install kexec-tools package - yum: + ansible.builtin.yum: name: kexec-tools state: present - name: Fetch packages information - package_facts: + ansible.builtin.package_facts: manager: rpm - name: Verify if kexec-tools package version is supported - fail: + ansible.builtin.fail: msg: "kexec-tools version 2.0.15 or higher is required" when: ansible_facts.packages['kexec-tools'][0].version < "2.0.15" - name: Backup existing fence_kdump configuration - replace: + ansible.builtin.replace: dest: "{{ kdump_config_file }}" regexp: '(^fence_kdump.*$)' replace: '# \1' - name: Configure fence_kdump intergration - blockinfile: + ansible.builtin.blockinfile: dest: "{{ kdump_config_file }}" marker: "" block: | diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kernel/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kernel/tasks/main.yml index 82f659ae6f0..a1774f3bc51 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kernel/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-kernel/tasks/main.yml @@ -1,37 +1,39 @@ --- - name: Verify that grubby package is installed - yum: + ansible.builtin.yum: name: grubby state: present - name: Detect if boot parameter is set when FIPS support is enabled - set_fact: + ansible.builtin.set_fact: add_boot_param: "{{ host_deploy_kernel_cmdline_new | regex_search('fips\\s*=\\s*1') and not host_deploy_kernel_cmdline_new | regex_search('boot\\s*=') }}" when: host_deploy_kernel_cmdline_new is not none - name: Fetch boot drive UUID - shell: findmnt --output=UUID --noheadings --target=/boot + ansible.builtin.command: findmnt --output=UUID --noheadings --target=/boot register: uuid - when: add_boot_param is defined and add_boot_param|bool + when: add_boot_param is defined and add_boot_param | bool failed_when: not uuid.stdout - tags: - - skip_ansible_lint # E305 - name: Add boot parameter to kernel parameters - set_fact: + ansible.builtin.set_fact: # noqa no-handler host_deploy_kernel_cmdline_new: "boot=UUID={{ uuid.stdout }} {{ host_deploy_kernel_cmdline_new }}" - when: uuid.changed|bool - tags: - - skip_ansible_lint # E503 + when: uuid.changed | bool - name: Removing old kernel arguments - shell: "grubby --update-kernel=ALL --remove-args '{{ host_deploy_kernel_cmdline_old }}'" + ansible.builtin.command: + argv: + - grubby + - --update-kernel=ALL + - --remove-args + - "'{{ host_deploy_kernel_cmdline_old }}'" when: host_deploy_kernel_cmdline_old is defined and host_deploy_kernel_cmdline_old - tags: - - skip_ansible_lint # E305 - name: Adding new kernel arguments - shell: "grubby --update-kernel=ALL --args '{{ host_deploy_kernel_cmdline_new }}'" + ansible.builtin.command: + argv: + - grubby + - --update-kernel=ALL + - --args + - "'{{ host_deploy_kernel_cmdline_new }}'" when: host_deploy_kernel_cmdline_new is defined and host_deploy_kernel_cmdline_new - tags: - - skip_ansible_lint # E305 diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-libvirt-guests/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-libvirt-guests/tasks/main.yml index 1431e6976df..083c3747c84 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-libvirt-guests/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-libvirt-guests/tasks/main.yml @@ -1,12 +1,13 @@ --- - name: Setup the configuration file - template: + ansible.builtin.template: src: libvirt-guests.j2 dest: /etc/sysconfig/libvirt-guests - backup: yes + backup: true + mode: 0644 - name: Start libvirt-guests service and ensure its enabled - service: + ansible.builtin.service: name: libvirt-guests state: started - enabled: yes + enabled: true diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/clock.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/clock.yml index 916013b0b6b..c9007584314 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/clock.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/clock.yml @@ -1,12 +1,10 @@ --- - name: Start chrony daemon - service: + ansible.builtin.service: name: chronyd state: started - name: Wait for chronyd to synchronise - shell: chronyc waitsync 1 + ansible.builtin.command: chronyc waitsync 1 register: chronyc_sync failed_when: "'FAILED' in chronyc_sync.stderr" - tags: - - skip_ansible_lint # E305 diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/gluster.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/gluster.yml index 24aee8f1970..dde9c3e55cb 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/gluster.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/gluster.yml @@ -1,25 +1,23 @@ --- - name: Fetch packages information - package_facts: + ansible.builtin.package_facts: manager: rpm - block: - name: Install vdsm-gluster package - yum: + ansible.builtin.yum: name: vdsm-gluster state: present - tags: - - skip_ansible_lint # E403 - name: Restart gluster - service: + ansible.builtin.service: name: glusterd state: started - enabled: yes + enabled: true - name: Restart glustereventsd - service: + ansible.builtin.service: name: glustereventsd state: started - enabled: yes + enabled: true when: "'vdsm-gluster' in ansible_facts.packages" diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/main.yml index fc18d5bea32..fb0759c15e5 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/main.yml @@ -1,11 +1,11 @@ --- - name: Include sync. clock tasks - include_tasks: clock.yml + ansible.builtin.include_tasks: clock.yml - name: Include gluster tasks - include_tasks: gluster.yml + ansible.builtin.include_tasks: gluster.yml when: host_deploy_gluster_supported|bool - name: Include tuned profile tasks - include_tasks: tuned_profile.yml + ansible.builtin.include_tasks: tuned_profile.yml when: not (host_deploy_tuned_profile == 'null' and not host_deploy_virt_enabled|bool) diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/tuned_profile.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/tuned_profile.yml index 9d5df3ad2af..8f673fc4c2f 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/tuned_profile.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-misc/tasks/tuned_profile.yml @@ -1,22 +1,24 @@ --- - name: Get lastest tune package - yum: + ansible.builtin.yum: name: tuned state: present - name: Start tune service - service: + ansible.builtin.service: name: tuned state: started - name: Set tune profile - shell: "tuned-adm profile {{ host_deploy_tuned_profile }}" + ansible.builtin.command: + argv: + - tuned-adm + - profile + - "{{ host_deploy_tuned_profile }}" register: tune_profile_set - tags: - - skip_ansible_lint # E305 - name: Enable tune service - service: + ansible.builtin.service: name: tuned - enabled: yes + enabled: true when: tune_profile_set.rc == 0 diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-ovn-certificates/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-ovn-certificates/tasks/main.yml index 1807de88d05..817a612cd3b 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-ovn-certificates/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-ovn-certificates/tasks/main.yml @@ -1,12 +1,12 @@ - block: - name: Create OVN key temporary files - tempfile: + ansible.builtin.tempfile: state: file suffix: ovn register: ovnkey - name: Generate OVN CSR - command: | + ansible.builtin.command: | '/usr/bin/openssl' 'req' '-new' @@ -20,14 +20,14 @@ register: csr - name: Copy OVN CSR - copy: + ansible.builtin.copy: # noqa template-instead-of-copy content: "{{ csr.stdout }}" dest: "{{ ovirt_pki_dir }}/requests/{{ ovirt_vds_hostname }}-ovn.req" mode: 0644 delegate_to: localhost - name: Run PKI enroll request for OVN - command: | + ansible.builtin.command: | "{{ ovirt_engine_usr }}/bin/pki-enroll-request.sh" "--name={{ ovirt_vds_hostname }}-ovn" "--subject=/O={{ ovirt_organizationname }}/CN={{ ovirt_vds_hostname }}" @@ -37,14 +37,15 @@ delegate_to: localhost - name: Prepare directory for OVN certificate files - file: + ansible.builtin.file: dest: "{{ ovirt_vdsm_trust_store ~ '/' ~ ovirt_ovn_cert_file | dirname }}" state: directory owner: 'openvswitch' group: 'openvswitch' + mode: 0755 - name: Add OVN cacert file - copy: + ansible.builtin.copy: # noqa template-instead-of-copy content: "{{ ovirt_ca_cert }}" dest: "{{ ovirt_vdsm_trust_store }}/{{ ovirt_ovn_ca_file }}" owner: 'openvswitch' @@ -52,28 +53,28 @@ mode: 0644 - name: Add OVN cert file - copy: + ansible.builtin.copy: src: "{{ ovirt_pki_dir }}/certs/{{ ovirt_vds_hostname }}-ovn.cer" dest: "{{ ovirt_vdsm_trust_store }}/{{ ovirt_ovn_cert_file }}" owner: 'openvswitch' group: 'openvswitch' - remote_src: no + remote_src: false mode: preserve - name: Add OVN key file - copy: + ansible.builtin.copy: src: "{{ ovnkey.path }}" dest: "{{ ovirt_vdsm_trust_store }}/{{ ovirt_ovn_key_file }}" owner: 'openvswitch' group: 'openvswitch' mode: 0440 - remote_src: yes + remote_src: true - name: Populate service facts - service_facts: + ansible.builtin.service_facts: - name: Restart OVN services - service: + ansible.builtin.service: name: "{{ item }}" state: restarted loop: @@ -84,6 +85,6 @@ always: - name: Remove temp file - file: + ansible.builtin.file: state: absent path: "{{ ovnkey.path }}" diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-spice-encryption/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-spice-encryption/tasks/main.yml index d77981710de..8e98acb880d 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-spice-encryption/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-spice-encryption/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Create SPICE config file with the right permissions - file: + ansible.builtin.file: dest: '/etc/pki/tls/spice.cnf' state: touch mode: 0644 @@ -8,9 +8,9 @@ group: root - name: Put cipher string to SPICE config file - copy: + ansible.builtin.copy: # noqa template-instead-of-copy dest: '/etc/pki/tls/spice.cnf' - backup: yes + backup: true mode: 0644 content: | CipherString = {{ host_deploy_spice_cipher_string }} diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm-certificates/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm-certificates/tasks/main.yml index c6d921eefa2..06e883a51de 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm-certificates/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm-certificates/tasks/main.yml @@ -1,6 +1,6 @@ - block: - name: Create vdsm and QEMU key temporary files - tempfile: + ansible.builtin.tempfile: state: file suffix: '{{ item.suffix }}' with_items: @@ -13,7 +13,7 @@ register: vdsmkeys - name: Generate vdsm and QEMU CSRs - command: | + ansible.builtin.command: | '/usr/bin/openssl' 'req' '-new' @@ -28,7 +28,7 @@ register: csrs - name: Copy vdsm and QEMU CSRs - copy: + ansible.builtin.copy: # noqa template-instead-of-copy content: "{{ item.stdout }}" dest: "{{ ovirt_pki_dir }}/{{ item.item.item.req_dir }}/{{ ovirt_vds_hostname }}.req" mode: 0644 @@ -36,7 +36,7 @@ delegate_to: localhost - name: Run PKI enroll request for vdsm and QEMU - command: | + ansible.builtin.command: | "{{ ovirt_engine_usr }}/bin/pki-enroll-request.sh" "--name={{ ovirt_vds_hostname }}" "--subject=/O={{ ovirt_organizationname }}/CN={{ ovirt_vds_hostname }}{{ item.ou }}" @@ -59,11 +59,12 @@ delegate_to: localhost - name: Prepare directories for vdsm certificate files - file: + ansible.builtin.file: dest: "{{ filedest }}" state: directory owner: 'vdsm' group: 'kvm' + mode: 0755 with_items: - "{{ ovirt_vdsm_trust_store ~ '/' ~ ovirt_vdsm_ca_file | dirname }}" - "{{ ovirt_vdsm_trust_store ~ '/' ~ ovirt_vdsm_key_file | dirname }}" @@ -76,7 +77,7 @@ loop_var: filedest - name: Add vdsm cacert files - copy: + ansible.builtin.copy: # noqa template-instead-of-copy content: "{{ ovirt_ca_cert }}" dest: "{{ filedest }}" owner: 'root' @@ -90,7 +91,7 @@ loop_var: filedest - name: Add QEMU cacert file - copy: + ansible.builtin.copy: # noqa template-instead-of-copy content: "{{ ovirt_qemu_ca_cert }}" dest: "{{ ovirt_vdsm_trust_store }}/{{ ovirt_qemu_ca_file }}" owner: 'root' @@ -98,13 +99,13 @@ mode: 0644 - name: Add vdsm cert files - copy: + ansible.builtin.copy: src: "{{ ovirt_pki_dir }}/certs/{{ ovirt_vds_hostname }}.cer" dest: "{{ filedest }}" owner: 'root' group: 'kvm' mode: 0644 - remote_src: no + remote_src: false with_items: - "{{ ovirt_vdsm_trust_store }}/{{ ovirt_vdsm_cert_file }}" - "{{ ovirt_vdsm_trust_store }}/{{ ovirt_vdsm_spice_cert_file }}" @@ -113,32 +114,32 @@ loop_var: filedest - name: Add QEMU server cert file - copy: + ansible.builtin.copy: src: "{{ ovirt_pki_dir }}/certs-qemu/{{ ovirt_vds_hostname }}.cer" dest: "{{ ovirt_vdsm_trust_store }}/{{ ovirt_qemu_cert_file }}" owner: 'root' group: 'kvm' - remote_src: no + remote_src: false mode: preserve - name: Add QEMU client cert file link - file: + ansible.builtin.file: src: "{{ ovirt_qemu_cert_file | basename }}" dest: "{{ ovirt_vdsm_trust_store }}/{{ ovirt_qemu_client_cert_file }}" state: link - name: Set vdsm key path - set_fact: - vdsmkey_path: "{{ vdsmkeys.results[0].path }}" + ansible.builtin.set_fact: + vdsmkey_path: "{{ vdsmkeys.results[0].path }}" - name: Add vdsm key files - copy: + ansible.builtin.copy: src: "{{ vdsmkey_path }}" dest: "{{ filedest }}" owner: 'vdsm' group: 'kvm' mode: 0440 - remote_src: yes + remote_src: true with_items: - "{{ ovirt_vdsm_trust_store }}/{{ ovirt_vdsm_key_file }}" - "{{ ovirt_vdsm_trust_store }}/{{ ovirt_vdsm_spice_key_file }}" @@ -147,29 +148,29 @@ loop_var: filedest - name: Set QEMU key path - set_fact: - qemukey_path: "{{ vdsmkeys.results[1].path }}" + ansible.builtin.set_fact: + qemukey_path: "{{ vdsmkeys.results[1].path }}" - name: Add QEMU server key file - copy: + ansible.builtin.copy: src: "{{ qemukey_path }}" dest: "{{ ovirt_vdsm_trust_store }}/{{ ovirt_qemu_key_file }}" owner: 'root' group: 'qemu' mode: 0440 - remote_src: yes + remote_src: true - name: Add QEMU client key file link - file: + ansible.builtin.file: src: "{{ ovirt_qemu_key_file | basename }}" dest: "{{ ovirt_vdsm_trust_store }}/{{ ovirt_qemu_client_key_file }}" state: link - name: Populate service facts - service_facts: + ansible.builtin.service_facts: - name: Restart services - service: + ansible.builtin.service: name: "{{ item }}" state: restarted loop: @@ -179,7 +180,7 @@ always: - name: Remove temp file - file: + ansible.builtin.file: state: absent path: "{{ item.path }}" loop: "{{ vdsmkeys.results }}" diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/defaults/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/defaults/main.yml index 8e4e03cb498..ab91bae0a0b 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/defaults/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/defaults/main.yml @@ -11,4 +11,4 @@ vendor_id_regexp: vendor_id\t+:\s\w* cpu_regexp: cpu\t+:\s\w* flags_regexp: flags\t+:\s.*\n features_regexp: features\t+:\s.*\n -platform_regexp: platform\t+:\s.*\n \ No newline at end of file +platform_regexp: platform\t+:\s.*\n diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/configure.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/configure.yml index c6668474db8..be064e93fec 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/configure.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/configure.yml @@ -12,12 +12,12 @@ # the local devices as FC storage in engine. - block: - - name: mask libvirt modular daemon units - systemd: + - name: Mask libvirt modular daemon units + ansible.builtin.systemd: name: "{{ item }}" state: stopped - enabled: no - masked: yes + enabled: false + masked: true with_items: - virtqemud.service - virtqemud.socket @@ -29,21 +29,21 @@ - virtstoraged.socket - virtproxyd.socket - - name: stop libvirtd - systemd: + - name: Stop libvirtd + ansible.builtin.systemd: name: "{{ item }}" state: stopped with_items: - libvirtd.service - libvirtd.socket - - name: remove modular sockets - shell: for i in virt{qemu,interface,network,nodedev,nwfilter,secret,storage,proxy}d libvirt; do rm -f /run/libvirt/${i}-*sock*; done; + - name: Remove modular sockets + ansible.builtin.shell: for i in virt{qemu,interface,network,nodedev,nwfilter,secret,storage,proxy}d libvirt; do rm -f /run/libvirt/${i}-*sock*; done; - - name: enable libvirtd monolithic daemon unit - systemd: + - name: Enable libvirtd monolithic daemon unit + ansible.builtin.systemd: name: "{{ item }}" - enabled: yes + enabled: true state: started with_items: - libvirtd.service @@ -51,52 +51,52 @@ # workaround for BZ 2054323 - name: Change the openvswitch_t to permissive - command: semanage permissive -a openvswitch_t - changed_when: True + ansible.builtin.command: semanage permissive -a openvswitch_t + changed_when: true - when: el_ver|int >= 9 + when: el_ver | int >= 9 - name: Configure LVM filter - command: vdsm-tool config-lvm-filter -y + ansible.builtin.command: vdsm-tool config-lvm-filter -y register: lvm_filter_result - when: el_ver|int >= 8 + when: el_ver | int >= 8 ignore_errors: true - name: Check for LVM filter configuration error - debug: + ansible.builtin.debug: msg: "[ERROR] Cannot configure LVM filter on host, please run: vdsm-tool config-lvm-filter" when: - - el_ver|int >= 8 + - el_ver | int >= 8 - lvm_filter_result.rc != 0 - name: Configure host for vdsm - command: vdsm-tool configure --force - changed_when: True + ansible.builtin.command: vdsm-tool configure --force + changed_when: true - name: Check existence of /etc/fapolicyd/rules.d directory - stat: + ansible.builtin.stat: path: /etc/fapolicyd/rules.d register: fapolicy_rules -- name: collect facts about system services - service_facts: +- name: Collect facts about system services + ansible.builtin.service_facts: - block: - - name: add vdsm-mom allow rule to fapolicy - copy: - dest: /etc/fapolicyd/rules.d/32-allow-vdsm-mom.rules - group: fapolicyd - mode: '0644' - remote_src: yes - content: | - allow perm=any trust=1 : dir=/etc/vdsm/mom.d/ ftype=text/x-lisp - allow perm=any trust=1 : dir=/var/tmp/ ftype=text/x-python + - name: Add vdsm-mom allow rule to fapolicy + ansible.builtin.copy: + dest: /etc/fapolicyd/rules.d/32-allow-vdsm-mom.rules + group: fapolicyd + mode: '0644' + remote_src: true + content: | + allow perm=any trust=1 : dir=/etc/vdsm/mom.d/ ftype=text/x-lisp + allow perm=any trust=1 : dir=/var/tmp/ ftype=text/x-python - - name: restart fapolicy service - systemd: - state: restarted - name: fapolicyd + - name: Restart fapolicy service + ansible.builtin.systemd: + state: restarted + name: fapolicyd when: - fapolicy_rules.stat.exists diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/libvirt_presets_configure.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/libvirt_presets_configure.yml index 4ac47109956..c5c6043d1ca 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/libvirt_presets_configure.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/libvirt_presets_configure.yml @@ -1,6 +1,6 @@ - block: - - name: create custom libvirt preset - copy: + - name: Create custom libvirt preset + ansible.builtin.copy: dest: /usr/lib/systemd/system-preset/88-ovirt-monolithic-libvirt.preset content: | # this is oVirt internal configuration and should not be updated manually @@ -14,5 +14,5 @@ disable virtstoraged.socket disable virtproxyd.socket enable libvirtd.service - - when: el_ver|int >= 9 + mode: 0644 + when: el_ver | int >= 9 diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/main.yml index 652046673bc..92978c0fea0 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Include packages, vdsmid, pki, configure, and restart services tasks - include_tasks: "{{ item }}.yml" + ansible.builtin.include_tasks: "{{ item }}.yml" with_items: - libvirt_presets_configure - packages diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/packages.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/packages.yml index ed3ae0f5dbe..1d6fe79b428 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/packages.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/packages.yml @@ -4,25 +4,21 @@ # on EL7, because yum on EL7 doesn't contain --nobest option # We need to skip on non x86_64 architecture, because HE is not supported there - name: Install ovirt-hosted-engine-setup package - yum: + ansible.builtin.yum: name: ovirt-hosted-engine-setup state: present when: ansible_architecture == 'x86_64' - tags: - - skip_ansible_lint # E403 - name: Install ovirt-host package - yum: + ansible.builtin.yum: name: ovirt-host state: present - tags: - - skip_ansible_lint # E403 - name: Get packages - package_facts: + ansible.builtin.package_facts: manager: rpm - name: Verify minimum vdsm version exists - fail: + ansible.builtin.fail: msg: "Minimum version required for vdsm is {{ host_deploy_vdsm_min_version }}" - when: host_deploy_vdsm_min_version|float < ansible_facts.packages['vdsm'][0].version|float + when: host_deploy_vdsm_min_version | float < ansible_facts.packages['vdsm'][0].version | float diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/pki.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/pki.yml index 933b4c38a5d..f13423b4b4c 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/pki.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/pki.yml @@ -1,4 +1,4 @@ - name: Run vdsm-certificates role - include_role: + ansible.builtin.include_role: name: ovirt-host-deploy-vdsm-certificates - public: yes + public: true diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/restart_services.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/restart_services.yml index 32968e242af..69d0b46808e 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/restart_services.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/restart_services.yml @@ -1,38 +1,38 @@ --- -- name: populate service facts - service_facts: +- name: Populate service facts + ansible.builtin.service_facts: - name: Stop services - service: + ansible.builtin.service: name: "{{ item }}" state: stopped loop: - - vdsmd.service - - supervdsmd.service - - libvirtd.service - - ovirt-imageio.service + - vdsmd.service + - supervdsmd.service + - libvirtd.service + - ovirt-imageio.service when: "item in ansible_facts.services" - name: Start and enable services - service: + ansible.builtin.service: name: "{{ item }}" state: started - enabled: yes + enabled: true loop: # vdsmd will start all its dependent services stopped earlier, libvirtd, ovirt-imageio, supervdsmd - - cgconfig.service - - messagebus.service - - vdsmd.service + - cgconfig.service + - messagebus.service + - vdsmd.service when: "item in ansible_facts.services" - name: Reload NetworkManager config - command: nmcli general reload conf + ansible.builtin.command: nmcli general reload conf when: - "'NetworkManager.service' in ansible_facts.services" - el_ver|int >= 8 - name: Restart NetworkManager service - service: + ansible.builtin.service: name: NetworkManager.service state: restarted when: diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/vdsmid.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/vdsmid.yml index 59accfefded..27d4deb8b9d 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/vdsmid.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vdsm/tasks/vdsmid.yml @@ -3,75 +3,75 @@ # NOTE: This must be done before configuring vdsm. # - name: Check if vdsm id exists - stat: + ansible.builtin.stat: path: "{{ vdsm_id_file }}" ignore_errors: true register: vdsm_id_stat - block: - - name: Get vdsm id - shell: cat "{{ vdsm_id_file }}" - register: vdsm_id_file_content - tags: - - skip_ansible_lint # E305 + - name: Get vdsm id + ansible.builtin.command: + argv: + - cat + - "{{ vdsm_id_file }}" + register: vdsm_id_file_content - - name: Set vdsm id - set_fact: - vdsm_id: "{{ vdsm_id_file_content.stdout }}" + - name: Set vdsm id + ansible.builtin.set_fact: + vdsm_id: "{{ vdsm_id_file_content.stdout }}" when: vdsm_id_stat.stat.exists - block: - - name: Install dmidecode package - yum: - name: dmidecode - state: present + - name: Install dmidecode package + ansible.builtin.yum: + name: dmidecode + state: present - - name: Detect vdsm id for x86_64 or i686 - shell: dmidecode -s system-uuid - register: vdsm_id_res - tags: - - skip_ansible_lint # E305 + - name: Detect vdsm id for x86_64 or i686 + ansible.builtin.command: dmidecode -s system-uuid + register: vdsm_id_res - - name: Set vdsm id for x86_64 or i686 - set_fact: - vdsm_id: "{{ vdsm_id_res.stdout }}" - when: - - vdsm_id_res.rc == 0 - - vdsm_id_res.stdout_lines|length == 1 - - not vdsm_id_res.stdout_lines[0]|regex_search(not_regexp) + - name: Set vdsm id for x86_64 or i686 + ansible.builtin.set_fact: + vdsm_id: "{{ vdsm_id_res.stdout }}" + when: + - vdsm_id_res.rc == 0 + - vdsm_id_res.stdout_lines|length == 1 + - not vdsm_id_res.stdout_lines[0]|regex_search(not_regexp) when: - not vdsm_id_stat.stat.exists - not vdsm_id - (ansible_facts.architecture == 'x86_64' or ansible_facts.architecture == 'i686') - block: - - name: Verify ppc system id path exists - stat: - path: "{{ ppc_system_id }}" - register: stat_results + - name: Verify ppc system id path exists + ansible.builtin.stat: + path: "{{ ppc_system_id }}" + register: stat_results - - name: Detect vdsm id for ppc or ppc64 - shell: cat "{{ ppc_system_id }}" - register: vdsm_id_res - when: stat_results.stat.exists - tags: - - skip_ansible_lint # E305 + - name: Detect vdsm id for ppc or ppc64 + ansible.builtin.command: + argv: + - cat + - "{{ ppc_system_id }}" + register: vdsm_id_res + when: stat_results.stat.exists - - name: Set vdsm id for ppc or ppc64 - set_fact: - vdsm_id: "{{ vdsm_id_res.stdout|replace(',', '') }}" - when: stat_results.stat.exists + - name: Set vdsm id for ppc or ppc64 + ansible.builtin.set_fact: + vdsm_id: "{{ vdsm_id_res.stdout | replace(',', '') }}" + when: stat_results.stat.exists when: - not vdsm_id - (ansible_facts.architecture == 'ppc' or ansible_facts.architecture == 'ppc64') - name: Set vdsm id if it wasn't found - set_fact: - vdsm_id: "{{ 9999999999999999999999|random|to_uuid }}" + ansible.builtin.set_fact: + vdsm_id: "{{ 9999999999999999999999 | random | to_uuid }}" when: not vdsm_id - name: Store vdsm id - copy: + ansible.builtin.copy: # noqa template-instead-of-copy content: "{{ vdsm_id }}" dest: "{{ vdsm_id_file }}" owner: root @@ -82,24 +82,24 @@ # to load the VDSM id to the engine. # See org.ovirt.engine.core.common.utils.ansible.AnsibleConstants - name: Fetch vdsm id - slurp: + ansible.builtin.slurp: src: "{{ vdsm_id_file }}" - name: Create vdsm.conf content - set_fact: + ansible.builtin.set_fact: vdsm_config_prefix: "[vars]\nssl= {{ host_deploy_vdsm_encrypt_host_communication }} \n" - name: Configure SSL ciphers for EL7 - set_fact: + ansible.builtin.set_fact: vdsm_config_prefix: "{{ vdsm_config_prefix }}ssl_ciphers= {{ host_deploy_vdsm_ssl_ciphers }} \n" - when: el_ver|int == 7 + when: el_ver | int == 7 - name: Add adresses to vdsm.conf - set_fact: + ansible.builtin.set_fact: vdsm_config_prefix: "{{ vdsm_config_prefix }} \n[addresses]\nmanagement_port= {{ host_deploy_vdsm_port }} \n" - name: Copy vdsm config prefix to vdsm.conf - copy: + ansible.builtin.copy: # noqa template-instead-of-copy content: "{{ vdsm_config_prefix }}" dest: "{{ vdsm_config_file }}" owner: root diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vm-console-certificates/defaults/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vm-console-certificates/defaults/main.yml index 09b0a78d765..ede4e945908 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vm-console-certificates/defaults/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vm-console-certificates/defaults/main.yml @@ -13,4 +13,4 @@ ovirt_vmconsole_key_file: '{{ ovirt_vmconsole_store }}/host-ssh_host_rsa' # vm-console private key variables: ovirt_vmconsole_key_size: 2048 -ovirt_vmconsole_key_type: rsa \ No newline at end of file +ovirt_vmconsole_key_type: rsa diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vm-console-certificates/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vm-console-certificates/tasks/main.yml index d1dcaae4520..e732ae347a6 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vm-console-certificates/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vm-console-certificates/tasks/main.yml @@ -1,83 +1,83 @@ - name: Find ovirt-vmconsole user - getent: + ansible.builtin.getent: database: passwd key: ovirt-vmconsole - block: - - name: Create vmconsole key temporary file - tempfile: - state: file - suffix: vmconsole - register: vmconsolekey + - name: Create vmconsole key temporary file + ansible.builtin.tempfile: + state: file + suffix: vmconsole + register: vmconsolekey - - name: Generate vmconsole CSR - command: | - '/usr/bin/openssl' - 'req' - '-new' - '-newkey' - '{{ ovirt_vmconsole_key_type }}:{{ ovirt_vmconsole_key_size }}' - '-nodes' - '-subj' - '/' - '-keyout' - '{{ vmconsolekey.path }}' - register: csr + - name: Generate vmconsole CSR + ansible.builtin.command: | + '/usr/bin/openssl' + 'req' + '-new' + '-newkey' + '{{ ovirt_vmconsole_key_type }}:{{ ovirt_vmconsole_key_size }}' + '-nodes' + '-subj' + '/' + '-keyout' + '{{ vmconsolekey.path }}' + register: csr - - name: Copy vmconsole CSR - copy: - content: "{{ csr.stdout }}" - dest: "{{ ovirt_pki_dir }}/requests/{{ ovirt_vds_hostname }}-ssh.req" - mode: 0644 - delegate_to: localhost + - name: Copy vmconsole CSR + ansible.builtin.copy: # noqa template-instead-of-copy + content: "{{ csr.stdout }}" + dest: "{{ ovirt_pki_dir }}/requests/{{ ovirt_vds_hostname }}-ssh.req" + mode: 0644 + delegate_to: localhost - - name: Run PKI enroll request for vmconsole - command: | - "{{ ovirt_engine_usr }}/bin/pki-enroll-request.sh" - "--name={{ ovirt_vds_hostname }}-ssh" - "--subject=/O={{ ovirt_organizationname }}/CN={{ ovirt_vds_hostname }}" - "--days={{ ovirt_vds_certificate_validity_in_days }}" - "--timeout={{ ovirt_signcerttimeoutinseconds }}" - delegate_to: localhost + - name: Run PKI enroll request for vmconsole + ansible.builtin.command: | + "{{ ovirt_engine_usr }}/bin/pki-enroll-request.sh" + "--name={{ ovirt_vds_hostname }}-ssh" + "--subject=/O={{ ovirt_organizationname }}/CN={{ ovirt_vds_hostname }}" + "--days={{ ovirt_vds_certificate_validity_in_days }}" + "--timeout={{ ovirt_signcerttimeoutinseconds }}" + delegate_to: localhost - - name: Run PKI enroll OpenSSH cert for vmconsole - command: | - "{{ ovirt_engine_usr }}/bin/pki-enroll-openssh-cert.sh" - "--name={{ ovirt_vds_hostname }}-ssh" - "--host" - "--id={{ ovirt_vds_hostname }}" - "--principals={{ ovirt_vds_hostname }}" - "--days={{ ovirt_vds_certificate_validity_in_days }}" - delegate_to: localhost + - name: Run PKI enroll OpenSSH cert for vmconsole + ansible.builtin.command: | + "{{ ovirt_engine_usr }}/bin/pki-enroll-openssh-cert.sh" + "--name={{ ovirt_vds_hostname }}-ssh" + "--host" + "--id={{ ovirt_vds_hostname }}" + "--principals={{ ovirt_vds_hostname }}" + "--days={{ ovirt_vds_certificate_validity_in_days }}" + delegate_to: localhost - - name: Add vmconsole cacert files - copy: - content: "{{ ovirt_ca_key }}" - dest: "{{ ovirt_vmconsole_ca_file }}" - mode: 0644 + - name: Add vmconsole cacert files + ansible.builtin.copy: # noqa template-instead-of-copy + content: "{{ ovirt_ca_key }}" + dest: "{{ ovirt_vmconsole_ca_file }}" + mode: 0644 - - name: Add cert files - copy: - src: "{{ ovirt_pki_dir }}/certs/{{ ovirt_vds_hostname }}-ssh-cert.pub" - dest: "{{ ovirt_vmconsole_cert_file }}" - remote_src: no - mode: 0644 + - name: Add cert files + ansible.builtin.copy: + src: "{{ ovirt_pki_dir }}/certs/{{ ovirt_vds_hostname }}-ssh-cert.pub" + dest: "{{ ovirt_vmconsole_cert_file }}" + remote_src: false + mode: 0644 - - name: Set vmconsole key path - set_fact: - vmconsolekey_path: "{{ vmconsolekey.path }}" + - name: Set vmconsole key path + ansible.builtin.set_fact: + vmconsolekey_path: "{{ vmconsolekey.path }}" - - name: Add vmconsole key file - copy: - src: "{{ vmconsolekey_path }}" - dest: "{{ ovirt_vmconsole_key_file }}" - owner: 'ovirt-vmconsole' - group: 'ovirt-vmconsole' - mode: 0400 - remote_src: yes + - name: Add vmconsole key file + ansible.builtin.copy: + src: "{{ vmconsolekey_path }}" + dest: "{{ ovirt_vmconsole_key_file }}" + owner: 'ovirt-vmconsole' + group: 'ovirt-vmconsole' + mode: 0400 + remote_src: true always: - - name: Remove temp file - file: - state: absent - path: "{{ vmconsolekey.path }}" + - name: Remove temp file + ansible.builtin.file: + state: absent + path: "{{ vmconsolekey.path }}" diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vm-console/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vm-console/tasks/main.yml index 907cc578103..a78e48bc0cd 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vm-console/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vm-console/tasks/main.yml @@ -1,23 +1,20 @@ --- - name: Install ovirt-vmconsole-host package - yum: + ansible.builtin.yum: name: ovirt-vmconsole-host state: present - tags: - - skip_ansible_lint # E403 - name: Run vm-console-certificates role - include_role: + ansible.builtin.include_role: name: ovirt-host-deploy-vm-console-certificates - public: yes + public: true - name: Populate service facts - service_facts: + ansible.builtin.service_facts: - name: Start ovirt-vmconsole-host-sshd - service: + ansible.builtin.service: name: ovirt-vmconsole-host-sshd state: restarted - enabled: yes + enabled: true when: "'ovirt-vmconsole-host-sshd.service' in ansible_facts.services" - diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vnc-certificates/handlers/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vnc-certificates/handlers/main.yml index f7fbacb58d4..6d547b7cc0a 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vnc-certificates/handlers/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vnc-certificates/handlers/main.yml @@ -1,8 +1,8 @@ # libvirtd may not be started automatically on hosts >= 4.4 if not # already running. Let's restart it, all dependent services will follow -- name: restart libvirtd - service: +- name: Restart libvirtd + ansible.builtin.service: name: libvirtd state: restarted - when: host_deploy_vnc_restart_services|bool + when: host_deploy_vnc_restart_services | bool listen: "restart libvirtd" diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vnc-certificates/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vnc-certificates/tasks/main.yml index 2cd69d9dbbe..0032de0746a 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vnc-certificates/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy-vnc-certificates/tasks/main.yml @@ -1,7 +1,7 @@ --- - block: - name: Create cert dir - file: + ansible.builtin.file: path: "{{ host_deploy_vnc_tls_x509_cert_dir }}" state: directory owner: vdsm @@ -9,12 +9,12 @@ mode: 0755 - name: Setup VNC PKI - copy: + ansible.builtin.copy: src: "{{ host_deploy_spice_tls_x509_cert_dir }}/{{ item }}" dest: "{{ host_deploy_vnc_tls_x509_cert_dir }}/{{ item }}" group: kvm owner: root - remote_src: yes + remote_src: true mode: preserve with_items: - "ca-cert.pem" @@ -22,14 +22,14 @@ - "server-key.pem" - name: Set server-key permissions - file: + ansible.builtin.file: path: "{{ host_deploy_vnc_tls_x509_cert_dir }}/server-key.pem" owner: vdsm group: kvm mode: 0440 - name: Modify qemu config file - enable TLS - blockinfile: + ansible.builtin.blockinfile: path: '/etc/libvirt/qemu.conf' state: present marker: '## {mark} of configuration section for VNC encryption' @@ -38,12 +38,12 @@ block: | vnc_tls=1 vnc_tls_x509_cert_dir="{{ host_deploy_vnc_tls_x509_cert_dir }}" - when: host_deploy_vnc_tls|bool + when: host_deploy_vnc_tls | bool notify: restart libvirtd - name: Modify qemu config file - disable TLS - blockinfile: + ansible.builtin.blockinfile: path: '/etc/libvirt/qemu.conf' state: absent marker: '## {mark} of configuration section for VNC encryption' @@ -52,7 +52,6 @@ block: | vnc_tls=1 vnc_tls_x509_cert_dir="{{ host_deploy_vnc_tls_x509_cert_dir }}" - when: not host_deploy_vnc_tls|bool + when: not host_deploy_vnc_tls | bool notify: restart libvirtd - diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy/meta/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy/meta/main.yml index 4c6dd9052a2..dc9d6cfd0d7 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy/meta/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-deploy/meta/main.yml @@ -1,13 +1,13 @@ dependencies: - - ovirt-host-deploy-facts - - ovirt-host-deploy-vdsm - - ovirt-host-deploy-misc - - ovirt-host-deploy-ovn-certificates - - ovirt-provider-ovn-driver - - ovirt-host-deploy-libvirt-guests - - ovirt-host-deploy-firewalld - - ovirt-host-deploy-vnc-certificates - - ovirt-host-deploy-kernel - - ovirt-host-deploy-vm-console - - ovirt-host-deploy-iptables - - ovirt-host-deploy-hosted-engine + - role: ovirt-host-deploy-facts + - role: ovirt-host-deploy-vdsm + - role: ovirt-host-deploy-misc + - role: ovirt-host-deploy-ovn-certificates + - role: ovirt-provider-ovn-driver + - role: ovirt-host-deploy-libvirt-guests + - role: ovirt-host-deploy-firewalld + - role: ovirt-host-deploy-vnc-certificates + - role: ovirt-host-deploy-kernel + - role: ovirt-host-deploy-vm-console + - role: ovirt-host-deploy-iptables + - role: ovirt-host-deploy-hosted-engine diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/defaults/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/defaults/main.yml index c6ec676f7aa..ee6871757ad 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/defaults/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/defaults/main.yml @@ -3,4 +3,4 @@ ovirt_vdsm_config_file: '/etc/vdsm/vdsm.conf' # Certificate paths: ## vdsm -ovirt_vdsm_trust_store: '/etc/pki/vdsm' \ No newline at end of file +ovirt_vdsm_trust_store: '/etc/pki/vdsm' diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/loadconfig.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/loadconfig.yml index 46b6e631fc1..1287c1b3278 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/loadconfig.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/loadconfig.yml @@ -1,30 +1,30 @@ - name: Check if VDSM config file exists - stat: + ansible.builtin.stat: path: "{{ ovirt_vdsm_config_file }}" register: vdsm_conf_stat - block: - - name: Temporary file for vdsm configuration - tempfile: - state: file - suffix: tempvdsmtrustore - register: tempvdsmconf - delegate_to: localhost + - name: Temporary file for vdsm configuration + ansible.builtin.tempfile: + state: file + suffix: tempvdsmtrustore + register: tempvdsmconf + delegate_to: localhost - - name: Load vdsm trust store location - fetch: - src: "{{ ovirt_vdsm_config_file }}" - dest: "{{ tempvdsmconf.path }}" - flat: yes + - name: Load vdsm trust store location + ansible.builtin.fetch: + src: "{{ ovirt_vdsm_config_file }}" + dest: "{{ tempvdsmconf.path }}" + flat: true - - name: Set VDSM truststore path - set_fact: - ovirt_vdsm_trust_store: "{{ lookup('ini', 'trust_store_path section=vars file=' ~ tempvdsmconf.path) or ovirt_vdsm_trust_store }}" + - name: Set VDSM truststore path + ansible.builtin.set_fact: + ovirt_vdsm_trust_store: "{{ lookup('ini', 'trust_store_path section=vars file=' ~ tempvdsmconf.path) or ovirt_vdsm_trust_store }}" when: vdsm_conf_stat.stat.exists always: - name: Remove temp VDSM config file - file: + ansible.builtin.file: state: absent path: "{{ tempvdsmconf.path }}" when: tempvdsmconf is defined diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/main.yml index bc1781df2dc..697a8256b74 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/main.yml @@ -1,4 +1,4 @@ -- include_tasks: "{{ item }}.yml" +- ansible.builtin.include_tasks: "{{ item }}.yml" with_items: - loadconfig - vdsm diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/ovn.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/ovn.yml index 62e675c504d..457aecc72c1 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/ovn.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/ovn.yml @@ -1,4 +1,4 @@ - name: Run ovn-certificates role - include_role: + ansible.builtin.include_role: name: ovirt-host-deploy-ovn-certificates - public: yes + public: true diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/vdsm.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/vdsm.yml index 933b4c38a5d..f13423b4b4c 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/vdsm.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/vdsm.yml @@ -1,4 +1,4 @@ - name: Run vdsm-certificates role - include_role: + ansible.builtin.include_role: name: ovirt-host-deploy-vdsm-certificates - public: yes + public: true diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/vmconsole.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/vmconsole.yml index c7d05bf094a..99226de9e08 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/vmconsole.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/vmconsole.yml @@ -1,4 +1,4 @@ - name: Run vm-console-certificates role - include_role: + ansible.builtin.include_role: name: ovirt-host-deploy-vm-console-certificates - public: yes + public: true diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/vnc.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/vnc.yml index e6d31f405e2..70e0185b8a5 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/vnc.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-enroll-certificate/tasks/vnc.yml @@ -1,4 +1,4 @@ - name: Run vnc-certificates role - include_role: + ansible.builtin.include_role: name: ovirt-host-deploy-vnc-certificates - public: yes + public: true diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-fix-encrypted-migrations/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-fix-encrypted-migrations/tasks/main.yml index 4c124ccf416..b42f0de4575 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-fix-encrypted-migrations/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-fix-encrypted-migrations/tasks/main.yml @@ -1,12 +1,12 @@ --- - name: Add QEMU migration client cert file link - file: + ansible.builtin.file: src: server-cert.pem dest: /etc/pki/vdsm/libvirt-migrate/client-cert.pem state: link - name: Add QEMU migration client key file link - file: + ansible.builtin.file: src: server-key.pem dest: /etc/pki/vdsm/libvirt-migrate/client-key.pem state: link diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-setup-vnc-sasl/handlers/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-setup-vnc-sasl/handlers/main.yml index 101f35a91c8..62f518dcb11 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-setup-vnc-sasl/handlers/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-setup-vnc-sasl/handlers/main.yml @@ -1,14 +1,14 @@ -- name: populate service facts and restart libvirtd - service_facts: +- name: Populate service facts and restart libvirtd + ansible.builtin.service_facts: register: services_in_vnc_sasl - changed_when: True + changed_when: true notify: restart libvirtd # libvirtd may not be started automatically on hosts >= 4.4 if not # already running. -- name: restart libvirtd - service: +- name: Restart libvirtd + ansible.builtin.service: name: libvirtd state: restarted when: "services_in_vnc_sasl['ansible_facts']['services'].get('libvirtd.service', {}).get('state') == 'running'" diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-setup-vnc-sasl/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-setup-vnc-sasl/tasks/main.yml index 83afcdec556..913bf39f9d5 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-setup-vnc-sasl/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-setup-vnc-sasl/tasks/main.yml @@ -1,30 +1,33 @@ --- - name: Create SASL QEMU config file - blockinfile: + ansible.builtin.blockinfile: path: '/etc/sasl2/qemu.conf' state: present - create: yes + create: true block: | mech_list: scram-sha-256 sasldb_path: /etc/sasl2/vnc_passwd.db + mode: 0644 - name: Use saslpasswd2 to create file with dummy user - command: saslpasswd2 -a dummy_db -f /etc/sasl2/vnc_passwd.db dummy_user -p + ansible.builtin.command: saslpasswd2 -a dummy_db -f /etc/sasl2/vnc_passwd.db dummy_user -p args: stdin: dummy_password creates: '/etc/sasl2/vnc_passwd.db' - name: Set ownership of the password db - file: + ansible.builtin.file: group: 'qemu' owner: 'qemu' state: 'file' path: '/etc/sasl2/vnc_passwd.db' + mode: 0640 - name: Modify qemu config file - enable VNC SASL authentication - lineinfile: + ansible.builtin.lineinfile: path: '/etc/libvirt/qemu.conf' state: present line: 'vnc_sasl=1' + mode: 0644 notify: populate service facts and restart libvirtd diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-upgrade/tasks/advanced-virtualization.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-upgrade/tasks/advanced-virtualization.yml index 339ef0b758c..8d127ce5abb 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-upgrade/tasks/advanced-virtualization.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-upgrade/tasks/advanced-virtualization.yml @@ -1,31 +1,29 @@ --- - block: - name: Fetch available redhat-release versions - dnf: + ansible.builtin.dnf: list: redhat-release - update_cache: yes + update_cache: true register: available_releases - name: Find latest available redhat-release version - set_fact: + ansible.builtin.set_fact: latest_rh_rel: "{{ item.version }}" loop: "{{ available_releases.results }}" - when: latest_rh_rel |default('0') is version(item.version, '<') + when: latest_rh_rel | default('0') is version(item.version, '<') - block: - name: Reset configuration of advanced virtualization module - shell: dnf -y module reset virt - tags: - - skip_ansible_lint # E305 + ansible.builtin.command: dnf -y module reset virt - name: Set virt module version for RHEL 8.6 and newer - set_fact: + ansible.builtin.set_fact: virt_mod_ver: "rhel" when: - latest_rh_rel is version('8.6', '>=') - name: Set virt module version for RHEL 8.4 and 8.5 - set_fact: + ansible.builtin.set_fact: virt_mod_ver: "av" when: - virt_mod_ver is not defined @@ -33,16 +31,20 @@ - latest_rh_rel is version('8.5', '<=') - name: Set virt module version for RHEL 8.3 and older - set_fact: + ansible.builtin.set_fact: virt_mod_ver: "{{ latest_rh_rel }}" when: - virt_mod_ver is not defined - latest_rh_rel is version('8.3', '<=') - name: Enable advanced virtualization module for relevant OS version - shell: "dnf -y module enable virt:{{ virt_mod_ver }}" - tags: - - skip_ansible_lint # E305 + ansible.builtin.command: + argv: + - dnf + - -y + - module + - enable + - "virt:{{ virt_mod_ver }}" when: - ansible_facts.packages['redhat-release'] is defined @@ -50,6 +52,6 @@ when: - host_deploy_origin_type != ovirt_origin_type - - not node_host|bool + - not node_host | bool - el_ver|int >= 8 - ansible_distribution_version is version('8.6', '<') # no need to update virt module version when we are on 8.6+ diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-host-upgrade/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-host-upgrade/tasks/main.yml index 30761d31a70..de87773d8fb 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-host-upgrade/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-host-upgrade/tasks/main.yml @@ -6,10 +6,12 @@ # tag. - name: Configure advanced virtualization - import_tasks: advanced-virtualization.yml + ansible.builtin.import_tasks: advanced-virtualization.yml - name: Remove network-scripts-openvswitch 2.11 package - shell: rpm -e --nodeps network-scripts-openvswitch2.11 + ansible.builtin.yum: # noqa ignore-errors + name: network-scripts-openvswitch2.11 + state: absent ignore_errors: true when: - el_ver|int == 8 @@ -17,84 +19,79 @@ - ovirt_openvswitch_pre.version is version ('2.11', '==') - name: Install ovirt-host package if it isn't installed - yum: + ansible.builtin.yum: name: ovirt-host state: present - update_cache: yes + update_cache: true lock_timeout: 300 tags: - updatecheck - - skip_ansible_lint # E403 # We need to check for updates after we enable new advanced virt module - name: Check for updated packages - import_role: + ansible.builtin.import_role: name: ovirt-host-check-upgrade - name: Prepare NGN host for upgrade - file: + ansible.builtin.file: path: /var/imgbased/.image-updated state: absent - when: node_host|bool + when: node_host | bool - name: Get installed version of vdsm - set_fact: + ansible.builtin.set_fact: previous_vdsm_package: "{{ ansible_facts.packages['vdsm'] | first }}" - name: Clean yum cache - command: - cmd: yum clean packages - ignore_errors: yes + ansible.builtin.command: yum clean packages # noqa ignore-errors + ignore_errors: true - name: Upgrade packages - yum: + ansible.builtin.yum: # noqa package-latest name: '*' state: latest register: updated_packages - when: not node_host| bool - tags: - - skip_ansible_lint # E403 + when: not node_host | bool -- name: populate service facts - service_facts: +- name: Populate service facts + ansible.builtin.service_facts: - name: Stop services - service: + ansible.builtin.service: name: "{{ item }}" state: stopped loop: - - ovirt-ha-agent.service - - ovirt-ha-broker.service + - ovirt-ha-agent.service + - ovirt-ha-broker.service when: "item in ansible_facts.services" ## Ignoring empty lines and the title "Obsoleting packages" - name: Upgrade packages - yum: + ansible.builtin.yum: # noqa package-latest name: "{{ item }}" state: latest lock_timeout: 300 conf_file: /tmp/yum.conf loop: "{{ yum_result.split('\n') }}" tags: - - updatecheck - - skip_ansible_lint # E403 + - updatecheck register: updated_packages - when: node_host| bool + when: node_host | bool - name: Start ovirt-ha-agent service - service: + ansible.builtin.service: name: ovirt-ha-agent - enabled: yes + enabled: true state: restarted when: ansible_facts.services["ovirt-ha-agent.service"] is defined - name: Check if image was updated - set_fact: + ansible.builtin.set_fact: image_pkg_updated: "{{ yum_result is search('image-update') }}" - when: node_host|bool + when: node_host | bool - name: Check if image-updated file exists - stat: + ansible.builtin.stat: path: /var/imgbased/.image-updated register: image_updated_file when: @@ -102,7 +99,7 @@ - image_pkg_updated|bool - name: Verify image was updated successfully - fail: + ansible.builtin.fail: msg: "Node image upgrade failed" when: - node_host|bool @@ -111,52 +108,52 @@ - host_deploy_cluster_version|float >= 4.4 - name: Configure LVM filter - command: vdsm-tool config-lvm-filter -y + ansible.builtin.command: vdsm-tool config-lvm-filter -y register: lvm_filter_result - when: el_ver|int >= 8 + when: el_ver | int >= 8 ignore_errors: true - name: Error configuring LVM filter - debug: + ansible.builtin.debug: msg: "[ERROR] Cannot configure LVM filter on host, please run: vdsm-tool config-lvm-filter" when: - el_ver|int >= 8 - lvm_filter_result.rc != 0 - name: Gather packages facts - package_facts: + ansible.builtin.package_facts: manager: rpm - name: Get upgraded version of vdsm - set_fact: + ansible.builtin.set_fact: upgraded_vdsm_package: "{{ ansible_facts.packages['vdsm'] | first }}" - name: Clean up no longer supported abrt integration block: - - name: Check existence of custom abrt coredump configuration - stat: - path: /etc/sysctl.d/50-coredump.conf - register: link - - - name: Remove custom abrt coredump configuration - file: - path: /etc/sysctl.d/50-coredump.conf - state: absent - when: - - link.stat.islnk is defined - - link.stat.lnk_source=="/dev/null" - - - name: Remove abrt packages - dnf: - name: - - abrt* - state: absent - when: "'abrt' in ansible_facts.packages" - - - name: Reload coredump pattern kernel configuration - command: /sbin/sysctl -p --system + - name: Check existence of custom abrt coredump configuration + ansible.builtin.stat: + path: /etc/sysctl.d/50-coredump.conf + register: link + + - name: Remove custom abrt coredump configuration + ansible.builtin.file: + path: /etc/sysctl.d/50-coredump.conf + state: absent + when: + - link.stat.islnk is defined + - link.stat.lnk_source=="/dev/null" + + - name: Remove abrt packages + ansible.builtin.dnf: + name: + - abrt* + state: absent + when: "'abrt' in ansible_facts.packages" + + - name: Reload coredump pattern kernel configuration + ansible.builtin.command: /sbin/sysctl -p --system when: - - previous_vdsm_package.version is version('4.50', '<') - - upgraded_vdsm_package.version is version('4.50', '>=') + - previous_vdsm_package.version is version('4.50', '<') + - upgraded_vdsm_package.version is version('4.50', '>=') diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-image-measure/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-image-measure/tasks/main.yml index c6bd94acb23..8f2205a9459 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-image-measure/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-image-measure/tasks/main.yml @@ -1,8 +1,6 @@ --- - name: Measure an image - command: qemu-img measure -O qcow2 {{ image_path }} + ansible.builtin.command: qemu-img measure -O qcow2 {{ image_path }} register: measurement_result - become: yes + become: true become_user: vdsm - tags: - - skip_ansible_lint # E301 diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-ova-export-post-pack/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-ova-export-post-pack/tasks/main.yml index 03ea084afb6..717e343053a 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-ova-export-post-pack/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-ova-export-post-pack/tasks/main.yml @@ -1,17 +1,17 @@ --- - name: Rename the OVA file - command: mv "{{ ova_file.dest }}" "{{ target_directory }}/{{ ova_name }}" + ansible.builtin.command: mv "{{ ova_file.dest }}" "{{ target_directory }}/{{ ova_name }}" async: "{{ ansible_timeout }}" poll: 15 when: packing_result.rc is defined and packing_result.rc == 0 - name: Remove the temporary file - file: + ansible.builtin.file: path: "{{ ova_file.dest }}" state: absent when: packing_result.rc is defined and packing_result.rc != 0 - name: Check OVA creation process result - fail: + ansible.builtin.fail: msg: "Failed to create OVA file" when: packing_result.rc is defined and packing_result.rc != 0 diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-ova-export-pre-pack/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-ova-export-pre-pack/tasks/main.yml index a4650d4cb36..6f6b1c1843a 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-ova-export-pre-pack/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-ova-export-pre-pack/tasks/main.yml @@ -1,40 +1,40 @@ --- - name: Examine target directory - stat: + ansible.builtin.stat: path: "{{ target_directory }}" register: target_directory_stats - name: Fail if target directory does not exist - fail: + ansible.builtin.fail: msg: "Target directory does not exist" when: not target_directory_stats.stat.exists - name: Fail if target directory is not a directory - fail: + ansible.builtin.fail: msg: "Target directory is not a directory" when: not target_directory_stats.stat.isdir - name: Fail if target directory is not writeable - fail: + ansible.builtin.fail: msg: "Target directory is not writeable" when: not target_directory_stats.stat.writeable - name: Removing the temporary file - file: + ansible.builtin.file: path: "{{ target_directory }}/{{ ova_name }}.tmp" state: absent when: validate_only is not defined - name: Allocating the temporary path for the OVA file - command: xfs_mkfile -n "{{ ova_size }}" "{{ target_directory }}/{{ ova_name }}.tmp" + ansible.builtin.command: xfs_mkfile -n "{{ ova_size }}" "{{ target_directory }}/{{ ova_name }}.tmp" async: "{{ ansible_timeout }}" poll: 15 when: validate_only is not defined - name: Retrieving the temporary path for the OVA file - file: + ansible.builtin.file: path: "{{ target_directory }}/{{ ova_name }}.tmp" state: touch + mode: 0660 register: ova_file when: validate_only is not defined - diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-ova-external-data/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-ova-external-data/tasks/main.yml index ce7cd134aa8..6b8565aeb1c 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-ova-external-data/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-ova-external-data/tasks/main.yml @@ -4,7 +4,7 @@ # --- - name: Run query script - script: > + ansible.builtin.script: > get_ova_data.py "{{ ovirt_ova_path }}" args: diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-ova-extract/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-ova-extract/tasks/main.yml index c4938b22183..682d32a2fe8 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-ova-extract/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-ova-extract/tasks/main.yml @@ -1,18 +1,19 @@ --- - block: - name: Create temporary directory - tempfile: + ansible.builtin.tempfile: state: directory suffix: ova register: ova_temp - name: Copy extract_ova.py to temp directory - copy: + ansible.builtin.copy: src: extract_ova.py dest: "{{ ova_temp.path }}/extract_ova.py" + mode: 0755 - name: Run extraction script - command: > + ansible.builtin.command: > "{{ ansible_python_interpreter }}" "{{ ova_temp.path }}/extract_ova.py" "{{ ovirt_import_ova_path }}" @@ -23,11 +24,11 @@ register: extraction_result - name: Check OVA extraction process result - fail: + ansible.builtin.fail: msg: "Failed to extract OVA file" when: extraction_result.rc is defined and extraction_result.rc != 0 always: - - name: Remove temp directory - file: - state: absent - path: "{{ ova_temp.path }}" + - name: Remove temp directory + ansible.builtin.file: + state: absent + path: "{{ ova_temp.path }}" diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-ova-pack/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-ova-pack/tasks/main.yml index 047340b093a..d4a6905c3e0 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-ova-pack/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-ova-pack/tasks/main.yml @@ -1,18 +1,19 @@ --- - block: - name: Create temporary directory - tempfile: + ansible.builtin.tempfile: state: directory suffix: ova register: ova_temp - name: Copy pack_ova.py to temp directory - copy: + ansible.builtin.copy: src: pack_ova.py dest: "{{ ova_temp.path }}/pack_ova.py" + mode: 0755 - name: Run packing script - command: > + ansible.builtin.command: > "{{ ansible_python_interpreter }}" "{{ ova_temp.path }}/pack_ova.py" "{{ entity_type }}" @@ -23,12 +24,12 @@ "{{ ovirt_ova_pack_nvram }}" "{{ ovirt_ova_pack_padding }}" register: packing_result - ignore_errors: yes + ignore_errors: true async: "{{ ansible_timeout }}" poll: 15 when: ova_file is defined and ova_file.dest is defined always: - - name: Remove temp directory - file: - state: absent - path: "{{ ova_temp.path }}" + - name: Remove temp directory + ansible.builtin.file: + state: absent + path: "{{ ova_temp.path }}" diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-ova-query/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-ova-query/tasks/main.yml index a07ea18a36d..19fe63aa646 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-ova-query/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-ova-query/tasks/main.yml @@ -1,18 +1,19 @@ --- - block: - name: Create temporary directory - tempfile: + ansible.builtin.tempfile: state: directory suffix: ova register: ova_temp - name: Copy query_ova.py to temp directory - copy: + ansible.builtin.copy: src: query_ova.py dest: "{{ ova_temp.path }}/query_ova.py" + mode: 0755 - name: Run query script - command: > + ansible.builtin.command: > "{{ ansible_python_interpreter }}" "{{ ova_temp.path }}/query_ova.py" "{{ entity_type }}" @@ -22,7 +23,7 @@ poll: 15 register: extraction_result always: - - name: Remove temp directory - file: - state: absent - path: "{{ ova_temp.path }}" + - name: Remove temp directory + ansible.builtin.file: + state: absent + path: "{{ ova_temp.path }}" diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/defaults/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/defaults/main.yml index 1d05f3d802a..db30ba684c3 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/defaults/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/defaults/main.yml @@ -1,7 +1,7 @@ --- ovn_engine_cluster_version: "{{ host_deploy_cluster_version | default('4.2') }}" ovn_central: "{{ host_deploy_ovn_central | default(omit) }}" -ovn_tunneling_interface: "{{ host_deploy_ovn_tunneling_interface| default('ovirtmgmt') }}" +ovn_tunneling_interface: "{{ host_deploy_ovn_tunneling_interface | default('ovirtmgmt') }}" ovn_host_fqdn: "{{ ovirt_vds_hostname | default('') }}" cluster_switch: "{{ host_deploy_cluster_switch_type | default('legacy') }}" ovn_state: "configured" diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/tasks/configure.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/tasks/configure.yml index 8f15e317535..85e88b95717 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/tasks/configure.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/tasks/configure.yml @@ -1,49 +1,49 @@ --- - block: - name: Install ovs - package: + ansible.builtin.package: name: openvswitch state: present - name: Ensure Open vSwitch is started - service: + ansible.builtin.service: name: openvswitch state: started - enabled: yes + enabled: true when: - - cluster_switch == "ovs" or (ovn_central is defined and ovn_central | ipaddr) + - cluster_switch == "ovs" or (ovn_central is defined and ovn_central | ipaddr) - block: - - name: Install ovirt-provider-ovn-driver - package: - name: ovirt-provider-ovn-driver - state: present - - - name: Ensure ovn-controller is started - service: - name: ovn-controller - state: started - enabled: yes - - - name: Fetch installed packages - package_facts: - manager: rpm - - - name: Get installed version of vdsm - set_fact: - vdsm_package: "{{ ansible_facts.packages['vdsm'] | first }}" - - - name: Skip Host FQDN for vdsm older than 4.50 - set_fact: - ovn_host_fqdn: "" - when: - - vdsm_package.version is version('4.50', '<') - - - name: Configure OVN for oVirt - command: > - vdsm-tool ovn-config {{ ovn_central }} {{ ovn_tunneling_interface }} {{ ovn_host_fqdn }} + - name: Install ovirt-provider-ovn-driver + ansible.builtin.package: + name: ovirt-provider-ovn-driver + state: present + + - name: Ensure ovn-controller is started + ansible.builtin.service: + name: ovn-controller + state: started + enabled: true + + - name: Fetch installed packages + ansible.builtin.package_facts: + manager: rpm + + - name: Get installed version of vdsm + ansible.builtin.set_fact: + vdsm_package: "{{ ansible_facts.packages['vdsm'] | first }}" + + - name: Skip Host FQDN for vdsm older than 4.50 + ansible.builtin.set_fact: + ovn_host_fqdn: "" + when: + - vdsm_package.version is version('4.50', '<') + + - name: Configure OVN for oVirt + ansible.builtin.command: > + vdsm-tool ovn-config {{ ovn_central }} {{ ovn_tunneling_interface }} {{ ovn_host_fqdn }} when: - - ovn_central is defined - - ovn_central | ipaddr + - ovn_central is defined + - ovn_central | ipaddr diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/tasks/main.yml index e6080debb60..99d00615ea7 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/tasks/main.yml @@ -1,10 +1,10 @@ --- - name: Import the configure ovn-provider-ovn file - import_tasks: configure.yml + ansible.builtin.import_tasks: configure.yml when: - ovn_state == "configured" - name: Import the unconfigure ovn-provider-ovn file - import_tasks: unconfigure.yml + ansible.builtin.import_tasks: unconfigure.yml when: - ovn_state == "unconfigured" diff --git a/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/tasks/unconfigure.yml b/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/tasks/unconfigure.yml index f0b794f3342..1a4793c3de5 100644 --- a/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/tasks/unconfigure.yml +++ b/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/tasks/unconfigure.yml @@ -1,13 +1,11 @@ --- - name: Check if ovirt-provider-ovn-driver is installed - package: + ansible.builtin.package: name: ovirt-provider-ovn-driver state: present check_mode: true register: ovn_packages - name: Unconfigure the OVN chassis - command: vdsm-tool ovn-unconfigure + ansible.builtin.command: vdsm-tool ovn-unconfigure # noqa no-handler when: not ovn_packages.changed - tags: - - skip_ansible_lint # E503 diff --git a/packaging/ansible-runner-service-project/project/roles/python-ver-detect/tasks/main.yml b/packaging/ansible-runner-service-project/project/roles/python-ver-detect/tasks/main.yml index 65ff2464420..670687a3f65 100644 --- a/packaging/ansible-runner-service-project/project/roles/python-ver-detect/tasks/main.yml +++ b/packaging/ansible-runner-service-project/project/roles/python-ver-detect/tasks/main.yml @@ -1,10 +1,10 @@ --- - name: Run import yaml on py3 - command: python3 -c "import yaml" + ansible.builtin.command: python3 -c "import yaml" register: result - ignore_errors: yes + ignore_errors: true changed_when: true - name: Set facts - set_fact: + ansible.builtin.set_fact: ansible_python_interpreter: "{{ '/usr/bin/python3' if result.rc == 0 else '/usr/bin/python2' }}"