From 99a0fedcd0cc6fddab9a33dda1e6b7a818dd72a8 Mon Sep 17 00:00:00 2001 From: Mark Goddard Date: Sun, 14 Jan 2024 09:44:47 +0000 Subject: [PATCH 01/13] CI: Add Ansible lint job --- .github/workflows/stackhpc-pull-request.yml | 40 +++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/.github/workflows/stackhpc-pull-request.yml b/.github/workflows/stackhpc-pull-request.yml index aba6bacb7..6e0b5a56d 100644 --- a/.github/workflows/stackhpc-pull-request.yml +++ b/.github/workflows/stackhpc-pull-request.yml @@ -61,6 +61,46 @@ jobs: - name: Run Tox ${{ matrix.environment }} ๐Ÿงช run: tox -e ${{ matrix.environment }} + lint: + runs-on: ubuntu-22.04 + permissions: {} + strategy: + fail-fast: false + matrix: + include: + # NOTE(upgrade): Keep these in sync with Kayobe's supported Ansible and Python versions (see release notes). + - ansible: "2.12" + # ansible-lint 6+ is not supported on Python 3.8. + ansible-lint: "5" + python: "3.8" + - ansible: "2.13" + ansible-lint: "6" + python: "3.10" + name: Ansible ${{ matrix.ansible }} lint with Python ${{ matrix.python }} + if: github.repository == 'stackhpc/stackhpc-kayobe-config' + steps: + - name: GitHub Checkout ๐Ÿ›Ž + uses: actions/checkout@v3 + + - name: Setup Python ${{ matrix.python-version }} ๐Ÿ + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python }} + + - name: Install dependencies ๐Ÿ“ฆ + run: | + python -m pip install --upgrade pip + pip install ansible-core==${{ matrix.ansible }}.* ansible-lint==${{ matrix.ansible-lint }}.* -r requirements.txt + + - name: Install Ansible Galaxy collections and roles + run: | + ansible-galaxy collection install -r etc/kayobe/ansible/requirements.yml + ansible-galaxy role install -r etc/kayobe/ansible/requirements.yml + + - name: Linting code ๐Ÿงช + run: | + ansible-lint -v --force-color etc/kayobe/ansible/ + # A skipped job is treated as success when used as a required status check. # The registered required status checks refer to the name of the job in the # called reusable workflow rather than the jobs in this file. The following From 0198462d89fe50bb216abcd8d5145390bb4ce859 Mon Sep 17 00:00:00 2001 From: Mark Goddard Date: Tue, 16 Jan 2024 12:24:56 +0000 Subject: [PATCH 02/13] wazuh: Use relative path to roles This allows us to install the role anywhere. --- etc/kayobe/ansible/wazuh-manager.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/etc/kayobe/ansible/wazuh-manager.yml b/etc/kayobe/ansible/wazuh-manager.yml index d4013ebd6..b75ed261a 100644 --- a/etc/kayobe/ansible/wazuh-manager.yml +++ b/etc/kayobe/ansible/wazuh-manager.yml @@ -35,7 +35,7 @@ # Certificates generation - hosts: localhost roles: - - role: "{{ playbook_dir }}/roles/wazuh-ansible/wazuh-ansible/roles/wazuh/wazuh-indexer" + - role: "wazuh-ansible/wazuh-ansible/roles/wazuh/wazuh-indexer" perform_installation: false become: no tags: @@ -45,10 +45,10 @@ become: yes become_user: root roles: - - role: "{{ playbook_dir }}/roles/wazuh-ansible/wazuh-ansible/roles/wazuh/wazuh-indexer" - - role: "{{ playbook_dir }}/roles/wazuh-ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager" - - role: "{{ playbook_dir }}/roles/wazuh-ansible/wazuh-ansible/roles/wazuh/ansible-filebeat-oss" - - role: "{{ playbook_dir }}/roles/wazuh-ansible/wazuh-ansible/roles/wazuh/wazuh-dashboard" + - role: "wazuh-ansible/wazuh-ansible/roles/wazuh/wazuh-indexer" + - role: "wazuh-ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager" + - role: "wazuh-ansible/wazuh-ansible/roles/wazuh/ansible-filebeat-oss" + - role: "wazuh-ansible/wazuh-ansible/roles/wazuh/wazuh-dashboard" post_tasks: - block: - name: Check if custom SCA policies directory exists From 235b1c3154179329298fac8e42b8703f55c7c5ac Mon Sep 17 00:00:00 2001 From: Mark Goddard Date: Tue, 16 Jan 2024 14:13:04 +0000 Subject: [PATCH 03/13] ansible-lint: Fix include_role parameter name --- etc/kayobe/ansible/deploy-github-runner.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/kayobe/ansible/deploy-github-runner.yml b/etc/kayobe/ansible/deploy-github-runner.yml index 47c8211a1..ae6693438 100644 --- a/etc/kayobe/ansible/deploy-github-runner.yml +++ b/etc/kayobe/ansible/deploy-github-runner.yml @@ -8,7 +8,7 @@ tasks: - name: Deploy runners ansible.builtin.include_role: - role: monolithprojects.github_actions_runner + name: monolithprojects.github_actions_runner vars: runner_name: "{{ ansible_facts.nodename }}-{{ runner.key }}" runner_dir: "{{ base_runner_dir }}/{{ runner.key }}" From 0be7a56724408bec1af08ebf6c1b60a794183cb6 Mon Sep 17 00:00:00 2001 From: Alex-Welsh Date: Wed, 6 Nov 2024 16:40:48 +0000 Subject: [PATCH 04/13] Add ansible-lint CI This commit includes a properly configured ansible-lint CI job and a large amount of changes to the existing playbooks so that the new CI passes. Some fixes were applied automatically with the --fix argument, many were made manually. There is some risk that the changes have altered the behaviour of the playbooks. --- .ansible-lint-ignore | 7 ++ .github/workflows/stackhpc-pull-request.yml | 13 ++-- etc/kayobe/ansible/advise-run.yml | 24 +++--- etc/kayobe/ansible/build-ofed-rocky.yml | 13 ++-- etc/kayobe/ansible/cephadm-commands-post.yml | 3 +- etc/kayobe/ansible/cephadm-commands-pre.yml | 3 +- etc/kayobe/ansible/cephadm-crush-rules.yml | 3 +- etc/kayobe/ansible/cephadm-deploy.yml | 3 +- etc/kayobe/ansible/cephadm-ec-profiles.yml | 3 +- etc/kayobe/ansible/cephadm-gather-keys.yml | 10 +-- etc/kayobe/ansible/cephadm-keys.yml | 3 +- etc/kayobe/ansible/cephadm-pools.yml | 3 +- etc/kayobe/ansible/cephadm.yml | 21 +++-- etc/kayobe/ansible/check-tags.yml | 7 +- etc/kayobe/ansible/cis.yml | 13 ++-- .../ansible/configure-aio-resources.yml | 38 ++++----- etc/kayobe/ansible/configure-vxlan.yml | 2 +- etc/kayobe/ansible/deploy-github-runner.yml | 8 +- .../ansible/deploy-os-capacity-exporter.yml | 20 +++-- etc/kayobe/ansible/diagnostics.yml | 5 +- etc/kayobe/ansible/docker-registry-login.yml | 4 +- etc/kayobe/ansible/firewalld-watchdog.yml | 5 +- etc/kayobe/ansible/fix-grub-rl9.yml | 6 +- etc/kayobe/ansible/fix-hostname.yml | 2 +- etc/kayobe/ansible/fix-houston.yml | 19 +++-- etc/kayobe/ansible/fix-networking.yml | 4 +- etc/kayobe/ansible/growroot.yml | 35 +++++---- etc/kayobe/ansible/hotfix-containers.yml | 23 +++--- .../ansible/install-pre-commit-hooks.yml | 2 +- etc/kayobe/ansible/nova-compute-disable.yml | 12 +-- etc/kayobe/ansible/nova-compute-drain.yml | 32 ++++---- etc/kayobe/ansible/nova-compute-enable.yml | 12 +-- .../ansible/octavia-amphora-image-build.yml | 40 +++++----- .../octavia-amphora-image-register.yml | 20 ++--- .../ansible/openstack-host-image-upload.yml | 21 ++--- .../ansible/ovn-fix-chassis-priorities.yml | 2 +- .../ansible/prometheus-network-names.yml | 8 +- etc/kayobe/ansible/pulp-auth-proxy.yml | 3 +- etc/kayobe/ansible/pulp-container-publish.yml | 5 +- etc/kayobe/ansible/pulp-container-sync.yml | 5 +- .../ansible/pulp-host-image-download.yml | 9 +-- .../ansible/pulp-host-image-promote.yml | 11 ++- etc/kayobe/ansible/pulp-host-image-upload.yml | 45 ++++++----- .../ansible/pulp-repo-promote-production.yml | 5 +- etc/kayobe/ansible/pulp-repo-publish.yml | 8 +- etc/kayobe/ansible/pulp-repo-sync.yml | 5 +- .../ansible/purge-command-not-found.yml | 4 +- etc/kayobe/ansible/push-ofed.yml | 2 +- etc/kayobe/ansible/rabbitmq-reset.yml | 43 +++++----- etc/kayobe/ansible/reboot.yml | 16 ++-- etc/kayobe/ansible/rekey-hosts.yml | 21 ++--- etc/kayobe/ansible/reset-bls-entries.yml | 12 +-- .../roles/pulp_auth_proxy/tasks/main.yml | 11 +-- etc/kayobe/ansible/rsyslog.yml | 42 +++++----- etc/kayobe/ansible/run-container-hotfix.yml | 17 ++-- etc/kayobe/ansible/smartmon-tools.yml | 36 +++++---- .../ansible/stackhpc-openstack-tests.yml | 15 ++-- .../ansible/stop-openstack-services.yml | 37 ++++----- etc/kayobe/ansible/ubuntu-upgrade.yml | 52 ++++++------- etc/kayobe/ansible/vault-deploy-barbican.yml | 29 +++---- etc/kayobe/ansible/vault-deploy-overcloud.yml | 56 ++++++------- etc/kayobe/ansible/vault-deploy-seed.yml | 36 +++++---- .../ansible/vault-generate-backend-tls.yml | 34 ++++---- .../ansible/vault-generate-internal-tls.yml | 20 ++--- .../vault-generate-test-external-tls.yml | 20 ++--- etc/kayobe/ansible/vault-unseal-overcloud.yml | 16 ++-- etc/kayobe/ansible/vault-unseal-seed.yml | 15 ++-- etc/kayobe/ansible/wazuh-agent.yml | 21 ++--- etc/kayobe/ansible/wazuh-manager.yml | 78 ++++++++++--------- etc/kayobe/ansible/wazuh-secrets.yml | 15 ++-- 70 files changed, 625 insertions(+), 568 deletions(-) create mode 100644 .ansible-lint-ignore diff --git a/.ansible-lint-ignore b/.ansible-lint-ignore new file mode 100644 index 000000000..3891e8f9a --- /dev/null +++ b/.ansible-lint-ignore @@ -0,0 +1,7 @@ +# This file contains ignores rule violations for ansible-lint +etc/kayobe/ansible/vault-deploy-barbican.yml fqcn[action-core] +etc/kayobe/ansible/vault-generate-backend-tls.yml fqcn[action-core] +etc/kayobe/ansible/vault-generate-internal-tls.yml fqcn[action-core] +etc/kayobe/ansible/vault-generate-test-external-tls.yml fqcn[action-core] +etc/kayobe/ansible/rabbitmq-reset.yml command-instead-of-module +etc/kayobe/ansible/ubuntu-upgrade.yml syntax-check[missing-file] diff --git a/.github/workflows/stackhpc-pull-request.yml b/.github/workflows/stackhpc-pull-request.yml index 6e0b5a56d..0af02d5ca 100644 --- a/.github/workflows/stackhpc-pull-request.yml +++ b/.github/workflows/stackhpc-pull-request.yml @@ -69,12 +69,9 @@ jobs: matrix: include: # NOTE(upgrade): Keep these in sync with Kayobe's supported Ansible and Python versions (see release notes). - - ansible: "2.12" - # ansible-lint 6+ is not supported on Python 3.8. - ansible-lint: "5" - python: "3.8" - - ansible: "2.13" - ansible-lint: "6" + - ansible: "2.16" + python: "3.12" + - ansible: "2.15" python: "3.10" name: Ansible ${{ matrix.ansible }} lint with Python ${{ matrix.python }} if: github.repository == 'stackhpc/stackhpc-kayobe-config' @@ -90,7 +87,7 @@ jobs: - name: Install dependencies ๐Ÿ“ฆ run: | python -m pip install --upgrade pip - pip install ansible-core==${{ matrix.ansible }}.* ansible-lint==${{ matrix.ansible-lint }}.* -r requirements.txt + pip install ansible-core==${{ matrix.ansible }}.* ansible-lint -r requirements.txt - name: Install Ansible Galaxy collections and roles run: | @@ -99,7 +96,7 @@ jobs: - name: Linting code ๐Ÿงช run: | - ansible-lint -v --force-color etc/kayobe/ansible/ + ansible-lint -v --force-color -x no-changed-when,risky-file-permissions,run-once,name[template],package-latest,yaml,role-name[path] etc/kayobe/ansible/. # A skipped job is treated as success when used as a required status check. # The registered required status checks refer to the name of the job in the diff --git a/etc/kayobe/ansible/advise-run.yml b/etc/kayobe/ansible/advise-run.yml index 3def59034..77c8e42d3 100644 --- a/etc/kayobe/ansible/advise-run.yml +++ b/etc/kayobe/ansible/advise-run.yml @@ -1,40 +1,40 @@ --- - name: ADVise run hosts: localhost - gather_facts: no + gather_facts: false tags: - advise vars: - venv: "~/venvs/advise-review" + venv: ~/venvs/advise-review input_dir: "{{ lookup('env', 'PWD') }}/overcloud-introspection-data" output_dir: "{{ lookup('env', 'PWD') }}/review" - advise_pattern: ".*.eval" # Uses regex + advise_pattern: .*.eval # Uses regex tasks: - name: Install dependencies - pip: + ansible.builtin.pip: virtualenv: "{{ venv }}" name: - git+https://github.com/stackhpc/ADVise state: latest - virtualenv_command: "python3 -m venv" + virtualenv_command: python3 -m venv - name: Create data directory - file: - path: '{{ output_dir }}/data' + ansible.builtin.file: + path: "{{ output_dir }}/data" state: directory - name: Extract data - shell: + ansible.builtin.shell: cmd: > {{ venv }}/bin/m2-extract {{ input_dir }}/*.json --output_dir {{ output_dir }}/data - name: Create review directory - file: - path: '{{ output_dir }}/results' + ansible.builtin.file: + path: "{{ output_dir }}/results" state: directory - name: Process data - shell: + ansible.builtin.shell: cmd: > {{ venv }}/bin/advise-process -I ipmi @@ -42,6 +42,6 @@ -o '{{ output_dir }}' - name: Visualise data - command: > + ansible.builtin.command: > {{ venv }}/bin/advise-visualise --output_dir '{{ output_dir }}' diff --git a/etc/kayobe/ansible/build-ofed-rocky.yml b/etc/kayobe/ansible/build-ofed-rocky.yml index 4c5b74bba..6263d0ba6 100644 --- a/etc/kayobe/ansible/build-ofed-rocky.yml +++ b/etc/kayobe/ansible/build-ofed-rocky.yml @@ -6,8 +6,8 @@ tasks: - name: Check whether noexec is enabled for /var/tmp ansible.builtin.lineinfile: - path: "/etc/fstab" - regexp: "noexec" + path: /etc/fstab + regexp: noexec state: absent changed_when: false check_mode: true @@ -42,7 +42,8 @@ - name: Add DOCA host repository package ansible.builtin.dnf: - name: https://developer.nvidia.com/downloads/networking/secure/doca-sdk/DOCA_2.8/doca-host-2.8.0-204000_{{ stackhpc_pulp_doca_ofed_version }}_rhel9{{ stackhpc_pulp_repo_rocky_9_minor_version }}.x86_64.rpm + name: "https://developer.nvidia.com/downloads/networking/secure/doca-sdk/DOCA_2.8/doca-host-2.8.0-204000_\ + {{ stackhpc_pulp_doca_ofed_version }}_rhel9{{ stackhpc_pulp_repo_rocky_9_minor_version }}.x86_64.rpm" disable_gpg_check: true - name: Install DOCA extra packages @@ -53,13 +54,13 @@ ansible.builtin.file: path: /home/cloud-user/ofed state: directory - mode: 0777 + mode: "0777" - name: Set build directory ansible.builtin.replace: path: /opt/mellanox/doca/tools/doca-kernel-support - regexp: 'TMP_DIR=\$1' - replace: 'TMP_DIR=/home/cloud-user/ofed' + regexp: TMP_DIR=\$1 + replace: TMP_DIR=/home/cloud-user/ofed - name: Build OFED kernel modules ansible.builtin.shell: diff --git a/etc/kayobe/ansible/cephadm-commands-post.yml b/etc/kayobe/ansible/cephadm-commands-post.yml index d60a135a3..c53c4d0ce 100644 --- a/etc/kayobe/ansible/cephadm-commands-post.yml +++ b/etc/kayobe/ansible/cephadm-commands-post.yml @@ -7,7 +7,8 @@ - cephadm - cephadm-commands tasks: - - import_role: + - name: Apply Cephadm role + ansible.builtin.import_role: name: stackhpc.cephadm.commands vars: cephadm_commands: "{{ cephadm_commands_post | default([]) }}" diff --git a/etc/kayobe/ansible/cephadm-commands-pre.yml b/etc/kayobe/ansible/cephadm-commands-pre.yml index abe58caa5..b59cf5162 100644 --- a/etc/kayobe/ansible/cephadm-commands-pre.yml +++ b/etc/kayobe/ansible/cephadm-commands-pre.yml @@ -7,7 +7,8 @@ - cephadm - cephadm-commands tasks: - - import_role: + - name: Apply Cephadm role + ansible.builtin.import_role: name: stackhpc.cephadm.commands vars: cephadm_commands: "{{ cephadm_commands_pre | default([]) }}" diff --git a/etc/kayobe/ansible/cephadm-crush-rules.yml b/etc/kayobe/ansible/cephadm-crush-rules.yml index b95b9f960..7bc9c9fbe 100644 --- a/etc/kayobe/ansible/cephadm-crush-rules.yml +++ b/etc/kayobe/ansible/cephadm-crush-rules.yml @@ -7,5 +7,6 @@ - cephadm - cephadm-crush-rules tasks: - - import_role: + - name: Apply Cephadm crush rule role + ansible.builtin.import_role: name: stackhpc.cephadm.crush_rules diff --git a/etc/kayobe/ansible/cephadm-deploy.yml b/etc/kayobe/ansible/cephadm-deploy.yml index 9cb0f712b..471d6037d 100644 --- a/etc/kayobe/ansible/cephadm-deploy.yml +++ b/etc/kayobe/ansible/cephadm-deploy.yml @@ -7,5 +7,6 @@ - cephadm - cephadm-deploy tasks: - - import_role: + - name: Apply Cephadm role + ansible.builtin.import_role: name: stackhpc.cephadm.cephadm diff --git a/etc/kayobe/ansible/cephadm-ec-profiles.yml b/etc/kayobe/ansible/cephadm-ec-profiles.yml index 5fb0f3bdc..9ba7f90bb 100644 --- a/etc/kayobe/ansible/cephadm-ec-profiles.yml +++ b/etc/kayobe/ansible/cephadm-ec-profiles.yml @@ -7,5 +7,6 @@ - cephadm - cephadm-ec-profiles tasks: - - import_role: + - name: Apply Cephadm EC profiles role + ansible.builtin.import_role: name: stackhpc.cephadm.ec_profiles diff --git a/etc/kayobe/ansible/cephadm-gather-keys.yml b/etc/kayobe/ansible/cephadm-gather-keys.yml index d2acf92d0..e4b8da206 100644 --- a/etc/kayobe/ansible/cephadm-gather-keys.yml +++ b/etc/kayobe/ansible/cephadm-gather-keys.yml @@ -32,13 +32,13 @@ loop: "{{ kolla_ceph_services | selectattr('required') | map(attribute='keys') | flatten | unique }}" - name: Generate ceph.conf - command: "cephadm shell -- ceph config generate-minimal-conf" + ansible.builtin.command: cephadm shell -- ceph config generate-minimal-conf become: true register: cephadm_ceph_conf changed_when: false - name: Ensure Kolla config directories are present - file: + ansible.builtin.file: state: directory path: "{{ kayobe_env_config_path }}/kolla/config/{{ kolla_service_to_key_dir[item.name] }}" loop: "{{ kolla_ceph_services | selectattr('required') }}" @@ -51,7 +51,7 @@ key_info: "{{ cephadm_key_info.results | selectattr('item', 'equalto', item.1) | first }}" cephadm_key: "{{ key_info.stdout }}" cephadm_user: "{{ item.1 }}" - copy: + ansible.builtin.copy: # Include a trailing newline. content: | {{ cephadm_key }} @@ -63,7 +63,7 @@ notify: Please add and commit the Kayobe configuration - name: Save ceph.conf to Kayobe configuration - copy: + ansible.builtin.copy: # Include a trailing newline. # Kolla Ansible's merge_configs module does not like the leading tabs in ceph.conf. content: | @@ -77,7 +77,7 @@ handlers: - name: Please add and commit the Kayobe configuration - debug: + ansible.builtin.debug: msg: >- Please add and commit the Ceph configuration files and keys in Kayobe configuration. Remember to encrypt the keys using Ansible Vault. diff --git a/etc/kayobe/ansible/cephadm-keys.yml b/etc/kayobe/ansible/cephadm-keys.yml index 6f632f4ca..6b2604519 100644 --- a/etc/kayobe/ansible/cephadm-keys.yml +++ b/etc/kayobe/ansible/cephadm-keys.yml @@ -7,5 +7,6 @@ - cephadm - cephadm-keys tasks: - - import_role: + - name: Apply Cephadm keys role + ansible.builtin.import_role: name: stackhpc.cephadm.keys diff --git a/etc/kayobe/ansible/cephadm-pools.yml b/etc/kayobe/ansible/cephadm-pools.yml index 938e6de19..bb61f9efb 100644 --- a/etc/kayobe/ansible/cephadm-pools.yml +++ b/etc/kayobe/ansible/cephadm-pools.yml @@ -7,5 +7,6 @@ - cephadm - cephadm-keys tasks: - - import_role: + - name: Apply Cephadm pools role + ansible.builtin.import_role: name: stackhpc.cephadm.pools diff --git a/etc/kayobe/ansible/cephadm.yml b/etc/kayobe/ansible/cephadm.yml index b699df153..8af7ea022 100644 --- a/etc/kayobe/ansible/cephadm.yml +++ b/etc/kayobe/ansible/cephadm.yml @@ -1,9 +1,16 @@ --- # Deploy Ceph via Cephadm. Create EC profiles, CRUSH rules, pools and keys. -- import_playbook: cephadm-deploy.yml -- import_playbook: cephadm-commands-pre.yml -- import_playbook: cephadm-ec-profiles.yml -- import_playbook: cephadm-crush-rules.yml -- import_playbook: cephadm-pools.yml -- import_playbook: cephadm-keys.yml -- import_playbook: cephadm-commands-post.yml +- name: Import Cephadm deploy playbook + import_playbook: cephadm-deploy.yml +- name: Import Cephadm commands pre playbook + import_playbook: cephadm-commands-pre.yml +- name: Import Cephadm ec profiles playbook + import_playbook: cephadm-ec-profiles.yml +- name: Import Cephadm crush rules playbook + import_playbook: cephadm-crush-rules.yml +- name: Import Cephadm pools playbook + import_playbook: cephadm-pools.yml +- name: Import Cephadm keys playbook + import_playbook: cephadm-keys.yml +- name: Import Cephadm commands post playbook + import_playbook: cephadm-commands-post.yml diff --git a/etc/kayobe/ansible/check-tags.yml b/etc/kayobe/ansible/check-tags.yml index bdfb294da..f01d0b5f1 100644 --- a/etc/kayobe/ansible/check-tags.yml +++ b/etc/kayobe/ansible/check-tags.yml @@ -7,18 +7,19 @@ gather_facts: false tasks: - name: Query images and tags - command: + ansible.builtin.command: cmd: >- {{ kayobe_config_path }}/../../tools/kolla-images.py list-tags register: kolla_images_result changed_when: false - name: Set a fact about images and tags - set_fact: + ansible.builtin.set_fact: kolla_images: "{{ kolla_images_result.stdout | from_yaml }}" # Use state=read and allow_missing=false to check for missing tags in test pulp. - - import_role: + - name: Check for missing tags + ansible.builtin.import_role: name: stackhpc.pulp.pulp_container_content vars: pulp_container_content: >- diff --git a/etc/kayobe/ansible/cis.yml b/etc/kayobe/ansible/cis.yml index 7f41ad9fa..faa6ab586 100644 --- a/etc/kayobe/ansible/cis.yml +++ b/etc/kayobe/ansible/cis.yml @@ -1,5 +1,4 @@ --- - - name: Security hardening hosts: cis-hardening become: true @@ -9,14 +8,14 @@ # TODO: Remove this when Red Hat FIPS policy has been updated to allow ed25519 keys. # https://gitlab.com/gitlab-org/gitlab/-/issues/367429#note_1840422075 - name: Assert that we are using a supported SSH key - assert: + ansible.builtin.assert: that: - ssh_key_type != 'ed25519' fail_msg: FIPS policy does not currently support ed25519 SSH keys on RHEL family systems when: ansible_facts.os_family == 'RedHat' - name: Ensure the cron package is installed on ubuntu - package: + ansible.builtin.package: name: cron state: present when: ansible_facts.distribution == 'Ubuntu' @@ -25,17 +24,19 @@ # This is to workaround an issue where we set the expiry to 365 days on kayobe # service accounts in a previous iteration of the CIS benchmark hardening # defaults. This should restore the defaults and can eventually be removed. - command: chage -m 0 -M 99999 -W 7 -I -1 {{ item }} + ansible.builtin.command: chage -m 0 -M 99999 -W 7 -I -1 {{ item }} become: true changed_when: false with_items: - "{{ kayobe_ansible_user }}" - "{{ kolla_ansible_user }}" - - include_role: + - name: Run CIS hardening role (RHEL 9) + ansible.builtin.include_role: name: ansible-lockdown.rhel9_cis when: ansible_facts.os_family == 'RedHat' and ansible_facts.distribution_major_version == '9' - - include_role: + - name: Run CIS hardening role (Ubuntu 22) + ansible.builtin.include_role: name: ansible-lockdown.ubuntu22_cis when: ansible_facts.distribution == 'Ubuntu' and ansible_facts.distribution_major_version == '22' diff --git a/etc/kayobe/ansible/configure-aio-resources.yml b/etc/kayobe/ansible/configure-aio-resources.yml index 48263c641..0aebc014f 100644 --- a/etc/kayobe/ansible/configure-aio-resources.yml +++ b/etc/kayobe/ansible/configure-aio-resources.yml @@ -1,13 +1,12 @@ --- - - name: Ensure dependencies are installed hosts: controllers[0] gather_facts: true vars: - venv: '{{ virtualenv_path }}/openstack' + venv: "{{ virtualenv_path }}/openstack" tasks: - name: Install python build dependencies - package: + ansible.builtin.package: name: "{{ packages | select | list }}" cache_valid_time: "{{ apt_cache_valid_time if ansible_facts.os_family == 'Debian' else omit }}" update_cache: "{{ True if ansible_facts.os_family == 'Debian' else omit }}" @@ -16,56 +15,53 @@ vars: packages: - "{% if ansible_facts.os_family == 'RedHat' %}gcc{% else %}build-essential{% endif %}" - - "python3-dev{% if ansible_facts.os_family == 'RedHat' %}el{% endif %}" + - python3-dev{% if ansible_facts.os_family == 'RedHat' %}el{% endif %} - "{% if ansible_facts.os_family == 'Debian' %}python3-venv{% endif %}" - name: Ensure latest version of pip is installed - pip: + ansible.builtin.pip: name: pip state: latest - virtualenv: '{{ venv }}' - virtualenv_command: "/usr/bin/python3 -m venv" + virtualenv: "{{ venv }}" + virtualenv_command: /usr/bin/python3 -m venv - name: Ensure python openstack client is installed - pip: + ansible.builtin.pip: name: python-openstackclient - virtualenv: '{{ venv }}' + virtualenv: "{{ venv }}" extra_args: -c "{{ pip_upper_constraints_file }}" - name: Include kolla secrets - include_vars: - dir: '{{ kayobe_env_config_path }}/kolla/' + ansible.builtin.include_vars: + dir: "{{ kayobe_env_config_path }}/kolla/" files_matching: passwords.yml name: kolla_passwords - name: Add an IP to connect to the instances # FIXME: host configure will have bounced the bridge # and removed the IP - command: ip a add 10.0.2.1/24 dev breth1 + ansible.builtin.command: ip a add 10.0.2.1/24 dev breth1 register: result - failed_when: 'result.rc !=0 and "RTNETLINK answers: File exists" not in - result.stderr' + failed_when: 'result.rc != 0 and "RTNETLINK answers: File exists" not in result.stderr' changed_when: result.rc == 0 become: true - name: Run init-run-once - script: + ansible.builtin.script: cmd: scripts/aio-init.sh creates: /tmp/.init-runonce environment: - KOLLA_OPENSTACK_COMMAND: '{{ venv }}/bin/openstack' + KOLLA_OPENSTACK_COMMAND: "{{ venv }}/bin/openstack" OS_PROJECT_DOMAIN_NAME: Default OS_USER_DOMAIN_NAME: Default OS_PROJECT_NAME: admin OS_TENANT_NAME: admin OS_USERNAME: admin - OS_PASSWORD: "{{ kolla_passwords.keystone_admin_password | mandatory('Could\ - \ not find keystone_admin_password in passwords.yml') }}" + OS_PASSWORD: "{{ kolla_passwords.keystone_admin_password | mandatory('Could not find keystone_admin_password in passwords.yml') }}" # Use kolla_external_fqdn in wallaby - OS_AUTH_URL: http://{{ kolla_external_fqdn | default(public_net_name - | net_fqdn) | default(public_net_name | net_vip_address, true) }}:5000 + OS_AUTH_URL: http://{{ kolla_external_fqdn | default(public_net_name | net_fqdn) | default(public_net_name | net_vip_address, true) }}:5000 OS_INTERFACE: public OS_ENDPOINT_TYPE: publicURL - OS_IDENTITY_API_VERSION: 3 + OS_IDENTITY_API_VERSION: "3" OS_REGION_NAME: RegionOne OS_AUTH_PLUGIN: password diff --git a/etc/kayobe/ansible/configure-vxlan.yml b/etc/kayobe/ansible/configure-vxlan.yml index 3abd3784e..0101b1ce1 100644 --- a/etc/kayobe/ansible/configure-vxlan.yml +++ b/etc/kayobe/ansible/configure-vxlan.yml @@ -8,6 +8,6 @@ # python interpreter. ansible_python_interpreter: /usr/bin/python3 # Work around no known_hosts entry on first boot. - ansible_ssh_common_args: "-o StrictHostKeyChecking=no" + ansible_ssh_common_args: -o StrictHostKeyChecking=no roles: - role: stackhpc.vxlan diff --git a/etc/kayobe/ansible/deploy-github-runner.yml b/etc/kayobe/ansible/deploy-github-runner.yml index ae6693438..8f69d8345 100644 --- a/etc/kayobe/ansible/deploy-github-runner.yml +++ b/etc/kayobe/ansible/deploy-github-runner.yml @@ -1,7 +1,7 @@ --- - name: Deploy GitHub Runner hosts: github-runners - become: yes + become: true roles: - role: geerlingguy.pip - role: geerlingguy.docker @@ -14,8 +14,7 @@ runner_dir: "{{ base_runner_dir }}/{{ runner.key }}" runner_labels: "{{ runner.value.labels | default(default_runner_labels) }}" runner_state: "{{ runner.value.state | default('started') }}" - with_dict: - "{{ github_runners }}" + with_dict: "{{ github_runners }}" loop_control: loop_var: runner @@ -28,7 +27,6 @@ enabled: true become: true when: runner_state | default('started') == 'started' - with_dict: - "{{ github_runners }}" + with_dict: "{{ github_runners }}" loop_control: loop_var: runner diff --git a/etc/kayobe/ansible/deploy-os-capacity-exporter.yml b/etc/kayobe/ansible/deploy-os-capacity-exporter.yml index 41d91bfbd..e60b1bba1 100644 --- a/etc/kayobe/ansible/deploy-os-capacity-exporter.yml +++ b/etc/kayobe/ansible/deploy-os-capacity-exporter.yml @@ -23,7 +23,7 @@ - name: Read admin-openrc credential file ansible.builtin.command: - cmd: "cat {{ lookup('ansible.builtin.env', 'KOLLA_CONFIG_PATH') }}/admin-openrc.sh" + cmd: cat {{ lookup('ansible.builtin.env', 'KOLLA_CONFIG_PATH') }}/admin-openrc.sh delegate_to: localhost register: credential when: stackhpc_enable_os_capacity @@ -31,12 +31,18 @@ - name: Set facts for admin credentials ansible.builtin.set_fact: - stackhpc_os_capacity_auth_url: "{{ credential.stdout_lines | select('match', '.*OS_AUTH_URL*.') | first | split('=') | last | replace(\"'\",'') }}" - stackhpc_os_capacity_project_name: "{{ credential.stdout_lines | select('match', '.*OS_PROJECT_NAME*.') | first | split('=') | last | replace(\"'\",'') }}" - stackhpc_os_capacity_domain_name: "{{ credential.stdout_lines | select('match', '.*OS_PROJECT_DOMAIN_NAME*.') | first | split('=') | last | replace(\"'\",'') }}" - stackhpc_os_capacity_openstack_region_name: "{{ credential.stdout_lines | select('match', '.*OS_REGION_NAME*.') | first | split('=') | last | replace(\"'\",'') }}" - stackhpc_os_capacity_username: "{{ credential.stdout_lines | select('match', '.*OS_USERNAME*.') | first | split('=') | last | replace(\"'\",'') }}" - stackhpc_os_capacity_password: "{{ credential.stdout_lines | select('match', '.*OS_PASSWORD*.') | first | split('=') | last | replace(\"'\",'') }}" + stackhpc_os_capacity_auth_url: "{{ credential.stdout_lines | select('match', '.*OS_AUTH_URL*.') \ + | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_project_name: "{{ credential.stdout_lines | select('match', '.*OS_PROJECT_NAME*.') \ + | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_domain_name: "{{ credential.stdout_lines | select('match', '.*OS_PROJECT_DOMAIN_NAME*.') \ + | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_openstack_region_name: "{{ credential.stdout_lines | select('match', '.*OS_REGION_NAME*.') \ + | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_username: "{{ credential.stdout_lines | select('match', '.*OS_USERNAME*.') \ + | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_password: "{{ credential.stdout_lines | select('match', '.*OS_PASSWORD*.') \ + | first | split('=') | last | replace(\"'\", '') }}" when: stackhpc_enable_os_capacity - name: Template clouds.yml diff --git a/etc/kayobe/ansible/diagnostics.yml b/etc/kayobe/ansible/diagnostics.yml index 7764698cb..edb5800b6 100644 --- a/etc/kayobe/ansible/diagnostics.yml +++ b/etc/kayobe/ansible/diagnostics.yml @@ -11,7 +11,8 @@ vars: diagnostics_path_local: "{{ lookup('env', 'PWD') }}/diagnostics" tasks: - - block: + - name: Run diagnostics + block: - name: Create a temporary directory for diagnostics ansible.builtin.tempfile: state: directory @@ -37,7 +38,7 @@ src: "{{ diagnostics_tmpdir.path }}/" dest: "{{ diagnostics_path_local }}/{{ inventory_hostname }}" mode: pull - archive: no + archive: false recursive: true copy_links: true verify_host: true diff --git a/etc/kayobe/ansible/docker-registry-login.yml b/etc/kayobe/ansible/docker-registry-login.yml index 39ad03600..5db2ed826 100644 --- a/etc/kayobe/ansible/docker-registry-login.yml +++ b/etc/kayobe/ansible/docker-registry-login.yml @@ -4,8 +4,8 @@ hosts: container-image-builders tasks: - name: Login to docker registry - docker_login: + community.docker.docker_login: registry_url: "{{ kolla_docker_registry or omit }}" username: "{{ kolla_docker_registry_username }}" password: "{{ kolla_docker_registry_password }}" - reauthorize: yes + reauthorize: true diff --git a/etc/kayobe/ansible/firewalld-watchdog.yml b/etc/kayobe/ansible/firewalld-watchdog.yml index 874992df7..c74edd358 100644 --- a/etc/kayobe/ansible/firewalld-watchdog.yml +++ b/etc/kayobe/ansible/firewalld-watchdog.yml @@ -23,7 +23,8 @@ firewalld_watchdog_timeout_s: 600 become: true tasks: - - when: firewalld_watchdog_state == 'present' + - name: Ensure watchdog is present + when: firewalld_watchdog_state == 'present' block: - name: Create firewalld-watchdog service unit file ansible.builtin.copy: @@ -61,7 +62,7 @@ - name: Remove firewalld-watchdog unit files ansible.builtin.file: - path: "/etc/systemd/system/{{ item }}" + path: /etc/systemd/system/{{ item }} state: absent loop: - firewalld-watchdog.service diff --git a/etc/kayobe/ansible/fix-grub-rl9.yml b/etc/kayobe/ansible/fix-grub-rl9.yml index c81402fe6..c3f3cabf8 100644 --- a/etc/kayobe/ansible/fix-grub-rl9.yml +++ b/etc/kayobe/ansible/fix-grub-rl9.yml @@ -1,15 +1,15 @@ --- - name: Remove "--root-dev-only" from grub.cfg if OS is Rocky Linux 9 hosts: overcloud - become: yes + become: true gather_facts: true tasks: - name: Remove "--root-dev-only" from /boot/efi/EFI/rocky/grub.cfg ansible.builtin.replace: path: /boot/efi/EFI/rocky/grub.cfg - regexp: '--root-dev-only\s?' - replace: '' + regexp: --root-dev-only\s? + replace: "" when: - ansible_facts['distribution'] == 'Rocky' - ansible_facts['distribution_major_version'] == '9' diff --git a/etc/kayobe/ansible/fix-hostname.yml b/etc/kayobe/ansible/fix-hostname.yml index ca5bd8883..539d47e62 100644 --- a/etc/kayobe/ansible/fix-hostname.yml +++ b/etc/kayobe/ansible/fix-hostname.yml @@ -8,7 +8,7 @@ # python interpreter. ansible_python_interpreter: /usr/bin/python3 # Work around no known_hosts entry on first boot. - ansible_ssh_common_args: "-o StrictHostKeyChecking=no" + ansible_ssh_common_args: -o StrictHostKeyChecking=no tasks: - name: Get current hostname ansible.builtin.command: diff --git a/etc/kayobe/ansible/fix-houston.yml b/etc/kayobe/ansible/fix-houston.yml index 6fa865792..8df8abc28 100644 --- a/etc/kayobe/ansible/fix-houston.yml +++ b/etc/kayobe/ansible/fix-houston.yml @@ -15,16 +15,15 @@ - name: Create Systemd Unit to workaround 'tc mirred to Houston' error hosts: network,compute - become: yes + become: true tasks: - name: Include kolla-ansible host vars - include_vars: "{{ kolla_config_path }}/inventory/overcloud/host_vars/{{ inventory_hostname }}" - + ansible.builtin.include_vars: "{{ kolla_config_path }}/inventory/overcloud/host_vars/{{ inventory_hostname }}" - name: Create systemd service for -ovs network interface - template: + ansible.builtin.template: src: fix-houston-interface.service.j2 - dest: "/etc/systemd/system/fix-houston-{{ item }}.service" + dest: /etc/systemd/system/fix-houston-{{ item }}.service loop: "{{ neutron_bridge_name.split(',') }}" vars: interface_name: "{{ item }}" @@ -32,13 +31,13 @@ notify: reload systemd - name: Enable and start systemd service for -ovs network interface - systemd: - name: "fix-houston-{{ item }}" - enabled: yes + ansible.builtin.systemd: # noqa command-instead-of-module + name: fix-houston-{{ item }} + enabled: true state: started when: neutron_bridge_name | length > 0 loop: "{{ neutron_bridge_name.split(',') }}" handlers: - - name: reload systemd - command: systemctl daemon-reload + - name: Reload systemd + ansible.builtin.command: systemctl daemon-reload # noqa command-instead-of-module diff --git a/etc/kayobe/ansible/fix-networking.yml b/etc/kayobe/ansible/fix-networking.yml index d3897e564..d4d291d0e 100644 --- a/etc/kayobe/ansible/fix-networking.yml +++ b/etc/kayobe/ansible/fix-networking.yml @@ -8,13 +8,13 @@ # python interpreter. ansible_python_interpreter: /usr/bin/python3 # Work around no known_hosts entry on first boot. - ansible_ssh_common_args: "-o StrictHostKeyChecking=no" + ansible_ssh_common_args: -o StrictHostKeyChecking=no tasks: - name: Ensure hosts are reachable ansible.builtin.wait_for_connection: - name: Ensure `hosts` file contains pulp and API entries - blockinfile: + ansible.builtin.blockinfile: path: /etc/hosts marker: "# {mark} Kayobe entries" block: | diff --git a/etc/kayobe/ansible/growroot.yml b/etc/kayobe/ansible/growroot.yml index 4748ab75b..f799c1b05 100644 --- a/etc/kayobe/ansible/growroot.yml +++ b/etc/kayobe/ansible/growroot.yml @@ -22,7 +22,7 @@ # python interpreter. ansible_python_interpreter: /usr/bin/python3 # Work around no known_hosts entry on first boot. - ansible_ssh_common_args: "-o StrictHostKeyChecking=no" + ansible_ssh_common_args: -o StrictHostKeyChecking=no # Don't assume facts are present. os_family: "{{ ansible_facts.os_family | default('Debian' if os_distribution == 'ubuntu' else 'RedHat') }}" # Ignore LVM check @@ -30,17 +30,23 @@ tasks: - name: Check LVM status - shell: - cmd: vgdisplay | grep -q lvm2 + ansible.builtin.shell: + cmd: set -o pipefail && vgdisplay | grep -q lvm2 changed_when: false failed_when: false check_mode: false register: lvm_check become: true - - block: + - name: Grow root PV + when: lvm_check.rc == 0 or growroot_ignore_lvm_check + # when: "'NOCHANGE' not in growpart.stdout" + # Commenting out the conditional because growpart is already triggered by cloud-init - hence it emits NOCHANGE + # Cloud-Inits growpart implementation has a bug https://bugzilla.redhat.com/show_bug.cgi?id=2122575 + # PVresize is not being triggered + block: - name: Check if growpart is installed - shell: + ansible.builtin.command: cmd: type growpart changed_when: false failed_when: false @@ -49,47 +55,42 @@ become: true - name: Ensure growpart is installed - package: + ansible.builtin.package: name: "{% if os_family == 'RedHat' %}cloud-utils-growpart{% else %}cloud-guest-utils{% endif %}" state: present cache_valid_time: "{{ apt_cache_valid_time if os_family == 'Debian' else omit }}" update_cache: "{{ true if os_family == 'Debian' else omit }}" become: true - when: growpart_check.rc !=0 + when: growpart_check.rc != 0 - name: Get root PV device - command: "pvs --select vg_name={{ growroot_vg }} --reportformat json" + ansible.builtin.command: pvs --select vg_name={{ growroot_vg }} --reportformat json register: pvs become: true changed_when: false check_mode: false - name: Fail if root PV device not found - fail: + ansible.builtin.fail: msg: > Expected LVM physical volume devices not found in volume group {{ growroot_vg }} when: (pvs.stdout | from_json).report[0].pv | length == 0 - name: Grow partition - command: "growpart {{ disk }} {{ part_num }}" + ansible.builtin.command: growpart {{ disk }} {{ part_num }} vars: pv: "{{ pvs.stdout | from_json }}" disk_tmp: "{{ pv.report[0].pv[0].pv_name[:-1] }}" disk: "{{ disk_tmp[:-1] if pv.report[0].pv[0].pv_name | regex_search('[a-z0-9]+[0-9]+p[0-9]+') else disk_tmp }}" part_num: "{{ pv.report[0].pv[0].pv_name[-1] }}" become: true - failed_when: "growpart.rc != 0 and 'NOCHANGE' not in growpart.stdout" + failed_when: growpart.rc != 0 and 'NOCHANGE' not in growpart.stdout changed_when: "'NOCHANGE' not in growpart.stdout" register: growpart - name: Grow LVM PV - command: "pvresize {{ disk }}" + ansible.builtin.command: pvresize {{ disk }} vars: pv: "{{ pvs.stdout | from_json }}" disk: "{{ pv.report[0].pv[0].pv_name }}" become: true - when: lvm_check.rc == 0 or growroot_ignore_lvm_check -# when: "'NOCHANGE' not in growpart.stdout" -# Commenting out the conditional because growpart is already triggered by cloud-init - hence it emits NOCHANGE -# Cloud-Inits growpart implementation has a bug https://bugzilla.redhat.com/show_bug.cgi?id=2122575 -# PVresize is not being triggered diff --git a/etc/kayobe/ansible/hotfix-containers.yml b/etc/kayobe/ansible/hotfix-containers.yml index 677105f3e..6e152f4d6 100644 --- a/etc/kayobe/ansible/hotfix-containers.yml +++ b/etc/kayobe/ansible/hotfix-containers.yml @@ -14,28 +14,27 @@ container_hotfix_command: "" container_hotfix_files: [] container_hotfix_container_regex: "" - container_hotfix_restart_containers: False - container_hotfix_become: False + container_hotfix_restart_containers: false + container_hotfix_become: false tasks: - name: Ensure inputs are valid - fail: - msg: "Invalid input. Container list cannot be empty. Either container_hotfix_command or container_hotfix_files must be populated." + ansible.builtin.fail: + msg: Invalid input. Container list cannot be empty. Either container_hotfix_command or container_hotfix_files must be populated. when: - - container_hotfix_container_regex == "" or - container_hotfix_command == "" and container_hotfix_files == [] + - container_hotfix_container_regex == "" or container_hotfix_command == "" and container_hotfix_files == [] - name: Get list of containers to hotfix - command: '{{ kolla_container_engine | default("docker")}} ps --format {% raw %}"{{.Names}}"{% endraw %}' + ansible.builtin.command: '{{ kolla_container_engine | default("docker") }} ps --format {% raw %}"{{ .Names }}"{% endraw %}' register: host_containers - name: Set fact for containers list - set_fact: + ansible.builtin.set_fact: containers_list: "{{ host_containers.stdout }}" - name: Fail if no containers match given regex vars: hotfix_containers: "{{ containers_list | split('\n') | regex_search(container_hotfix_container_regex) }}" - fail: + ansible.builtin.fail: msg: "No containers matched. Please check your regex. Containers running on host: {{ host_containers.stdout_lines }}" when: hotfix_containers == "" @@ -47,14 +46,14 @@ - name: Ensure container hotfix file(s) exist on host ansible.builtin.copy: src: "{{ item.src }}" - dest: "/tmp/hotfix-files/{{ index }}" + dest: /tmp/hotfix-files/{{ index }} loop: "{{ container_hotfix_files }}" loop_control: index_var: index when: container_hotfix_files != [] - name: Apply hotfix - include_tasks: run-container-hotfix.yml + ansible.builtin.include_tasks: run-container-hotfix.yml loop: "{{ containers_list | regex_findall(container_hotfix_container_regex, multiline=True) | list | unique }}" loop_control: loop_var: hotfix_container @@ -65,6 +64,6 @@ state: absent - name: Restart containers if requested - command: "{{ kolla_container_engine | default('docker')}} restart {{ item }}" + ansible.builtin.command: "{{ kolla_container_engine | default('docker') }} restart {{ item }}" loop: "{{ containers_list | regex_findall(container_hotfix_container_regex, multiline=True) | list | unique }}" when: container_hotfix_restart_containers diff --git a/etc/kayobe/ansible/install-pre-commit-hooks.yml b/etc/kayobe/ansible/install-pre-commit-hooks.yml index 694e07bec..6059d3bdd 100644 --- a/etc/kayobe/ansible/install-pre-commit-hooks.yml +++ b/etc/kayobe/ansible/install-pre-commit-hooks.yml @@ -10,7 +10,7 @@ - name: Install pre-commit hooks into kayobe virtual env ansible.builtin.pip: name: pre-commit - version: "{{ pre_commit_version }}" + version: "{{ pre_commit_version }}" virtualenv: "{{ lookup('ansible.builtin.env', 'VIRTUAL_ENV') | default(omit, true) }}" register: pip_install diff --git a/etc/kayobe/ansible/nova-compute-disable.yml b/etc/kayobe/ansible/nova-compute-disable.yml index 32ea8ee09..497fa7ea3 100644 --- a/etc/kayobe/ansible/nova-compute-disable.yml +++ b/etc/kayobe/ansible/nova-compute-disable.yml @@ -1,17 +1,17 @@ --- - name: Disable nova compute service hosts: compute - gather_facts: yes + gather_facts: true tags: - nova-compute-disable vars: venv: "{{ virtualenv_path }}/openstack" - disabled_reason: "Down for maintenance by nova-compute-disable.yml" + disabled_reason: Down for maintenance by nova-compute-disable.yml tasks: - name: Set up openstack cli virtualenv - pip: + ansible.builtin.pip: virtualenv: "{{ venv }}" - virtualenv_command: "/usr/bin/python3 -m venv" + virtualenv_command: /usr/bin/python3 -m venv name: - python-openstackclient state: latest @@ -22,7 +22,7 @@ ansible_host: "{{ hostvars[groups['controllers'][0]].ansible_host }}" - name: Query nova compute services - command: > + ansible.builtin.command: > {{ venv }}/bin/openstack compute service list --format json @@ -34,7 +34,7 @@ ansible_host: "{{ hostvars[groups['controllers'][0]].ansible_host }}" - name: Disable nova compute service - command: > + ansible.builtin.command: > {{ venv }}/bin/openstack compute service set {{ ansible_facts.nodename }} nova-compute diff --git a/etc/kayobe/ansible/nova-compute-drain.yml b/etc/kayobe/ansible/nova-compute-drain.yml index dddf84634..6aaa8cad5 100644 --- a/etc/kayobe/ansible/nova-compute-drain.yml +++ b/etc/kayobe/ansible/nova-compute-drain.yml @@ -1,7 +1,7 @@ --- - name: Drain a nova compute host of instances hosts: compute - gather_facts: yes + gather_facts: true tags: - nova-compute-drain vars: @@ -9,9 +9,9 @@ live_migration_fatal: true tasks: - name: Set up openstack cli virtualenv - pip: + ansible.builtin.pip: virtualenv: "{{ venv }}" - virtualenv_command: "/usr/bin/python3 -m venv" + virtualenv_command: /usr/bin/python3 -m venv name: - python-openstackclient state: latest @@ -21,9 +21,17 @@ vars: ansible_host: "{{ hostvars[groups['controllers'][0]].ansible_host }}" - - block: + - name: Migrate instances + delegate_to: "{{ groups['controllers'][0] }}" + environment: "{{ openstack_auth_env }}" + when: + - "'compute' in group_names" + - groups['compute'] | length > 1 + vars: + ansible_host: "{{ hostvars[groups['controllers'][0]].ansible_host }}" + block: - name: Query instances - command: > + ansible.builtin.command: > {{ venv }}/bin/openstack server list --host {{ ansible_facts.nodename }} --all-projects @@ -32,7 +40,7 @@ register: instances - name: Live migrate instances - command: > + ansible.builtin.command: > {{ venv }}/bin/openstack --os-compute-api-version 2.25 server migrate @@ -50,7 +58,7 @@ - result is failed - name: Query instances - command: > + ansible.builtin.command: > {{ venv }}/bin/openstack server list --host {{ ansible_facts.nodename }} --all-projects @@ -59,17 +67,9 @@ register: instances - name: Fail if there are instances still on the host - fail: + ansible.builtin.fail: msg: > Instances still on {{ inventory_hostname }}: {{ instances.stdout | from_json }} when: - live_migration_fatal | bool - instances.stdout | from_json | length > 0 - - delegate_to: "{{ groups['controllers'][0] }}" - environment: "{{ openstack_auth_env }}" - when: - - "'compute' in group_names" - - groups['compute'] | length > 1 - vars: - ansible_host: "{{ hostvars[groups['controllers'][0]].ansible_host }}" diff --git a/etc/kayobe/ansible/nova-compute-enable.yml b/etc/kayobe/ansible/nova-compute-enable.yml index f880a2aa4..9efe15106 100644 --- a/etc/kayobe/ansible/nova-compute-enable.yml +++ b/etc/kayobe/ansible/nova-compute-enable.yml @@ -1,17 +1,17 @@ --- - name: Enable nova compute service hosts: compute - gather_facts: yes + gather_facts: true tags: - nova-compute-enable vars: venv: "{{ virtualenv_path }}/openstack" - disabled_reason: "Down for maintenance by nova-compute-disable.yml" + disabled_reason: Down for maintenance by nova-compute-disable.yml tasks: - name: Set up openstack cli virtualenv - pip: + ansible.builtin.pip: virtualenv: "{{ venv }}" - virtualenv_command: "/usr/bin/python3 -m venv" + virtualenv_command: /usr/bin/python3 -m venv name: - python-openstackclient state: latest @@ -22,7 +22,7 @@ ansible_host: "{{ hostvars[groups['controllers'][0]].ansible_host }}" - name: Query nova compute services - command: > + ansible.builtin.command: > {{ venv }}/bin/openstack compute service list --format json @@ -34,7 +34,7 @@ ansible_host: "{{ hostvars[groups['controllers'][0]].ansible_host }}" - name: Enable nova compute service - command: > + ansible.builtin.command: > {{ venv }}/bin/openstack compute service set {{ ansible_facts.nodename }} nova-compute diff --git a/etc/kayobe/ansible/octavia-amphora-image-build.yml b/etc/kayobe/ansible/octavia-amphora-image-build.yml index 3d880d21f..4ed852abf 100644 --- a/etc/kayobe/ansible/octavia-amphora-image-build.yml +++ b/etc/kayobe/ansible/octavia-amphora-image-build.yml @@ -5,7 +5,7 @@ amphora_dib_upper_constraints_file: "{{ pip_upper_constraints_file }}" tasks: - name: Install EPEL - package: + ansible.builtin.package: name: epel-release become: true when: @@ -28,25 +28,25 @@ - qemu-utils - git - kpartx - package: + ansible.builtin.package: name: "{{ packages_for_os_family[ansible_facts.os_family] }}" - name: Create a temporary directory - tempfile: + ansible.builtin.tempfile: state: directory register: tempfile_result - - block: + - name: Build Amphora image + block: - name: Check whether the image cache directory exists - stat: + ansible.builtin.stat: path: "{{ image_cache_path }}" - get_md5: False - get_checksum: False - mime: False + get_checksum: false + mime: false register: image_cache_stat - name: Ensure the image cache directory exists - file: + ansible.builtin.file: path: "{{ image_cache_path }}" state: directory owner: "{{ ansible_facts.user_uid }}" @@ -59,27 +59,27 @@ - name: Set path facts vars: work_path: "{{ tempfile_result.path }}" - set_fact: + ansible.builtin.set_fact: src_path: "{{ work_path }}/octavia" venv_path: "{{ work_path }}/venv" work_path: "{{ work_path }}" - name: Clone Octavia source code - git: + ansible.builtin.git: depth: 1 dest: "{{ src_path }}" - repo: "https://opendev.org/openstack/octavia" + repo: https://opendev.org/openstack/octavia version: "{{ openstack_branch }}" - name: Install diskimage-builder in a virtual environment - pip: + ansible.builtin.pip: name: diskimage-builder extra_args: "{% if amphora_dib_upper_constraints_file %}-c {{ amphora_dib_upper_constraints_file }}{% endif %}" virtualenv: "{{ venv_path }}" - virtualenv_command: "python3.{{ ansible_facts.python.version.minor }} -m venv" + virtualenv_command: python3.{{ ansible_facts.python.version.minor }} -m venv - name: Create build log file (/var/log/octavia-amphora-image-build.log) - file: + ansible.builtin.file: path: /var/log/octavia-amphora-image-build.log state: touch owner: "{{ ansible_facts.user_uid }}" @@ -87,18 +87,20 @@ become: true - name: Create the Amphora image - shell: - cmd: "source {{ venv_path }}/bin/activate && ./diskimage-create.sh -i ubuntu-minimal -s 3 -g {{ openstack_branch }} >> /var/log/octavia-amphora-image-build.log 2>&1" + ansible.builtin.shell: + cmd: >- + source {{ venv_path }}/bin/activate && + ./diskimage-create.sh -i ubuntu-minimal -s 3 -g {{ openstack_branch }} >> /var/log/octavia-amphora-image-build.log 2>&1 chdir: "{{ src_path }}/diskimage-create" changed_when: true - name: Copy image to image store - copy: + ansible.builtin.copy: src: "{{ src_path }}/diskimage-create/amphora-x64-haproxy.qcow2" dest: "{{ image_cache_path }}/amphora-x64-haproxy-{{ openstack_release }}.qcow2" remote_src: true always: - name: Remove temporary files - file: + ansible.builtin.file: path: "{{ work_path }}" state: absent diff --git a/etc/kayobe/ansible/octavia-amphora-image-register.yml b/etc/kayobe/ansible/octavia-amphora-image-register.yml index d0da1b55c..cba191ef2 100644 --- a/etc/kayobe/ansible/octavia-amphora-image-register.yml +++ b/etc/kayobe/ansible/octavia-amphora-image-register.yml @@ -1,12 +1,12 @@ --- - name: Register an Octavia Amphora image in Glance - gather_facts: yes + gather_facts: true hosts: "{{ amphora_builder_group | default('seed') }}" vars: venv: "{{ virtualenv_path }}/octavia-amphora" tasks: - name: Fail if not using octavia user and service project - fail: + ansible.builtin.fail: msg: >- Source the octavia-openrc.sh file before executing this playbook when: >- @@ -14,21 +14,21 @@ lookup('env', 'OS_PROJECT_NAME') != 'service' - name: Get image checksum - stat: + ansible.builtin.stat: path: "{{ image_cache_path }}/amphora-x64-haproxy-{{ openstack_release }}.qcow2" checksum_algorithm: md5 changed_when: false register: image_checksum - name: Assert that Amphora image exists - assert: + ansible.builtin.assert: that: image_checksum.stat.exists fail_msg: | The amphora image: {{ image_cache_path }}/amphora-x64-haproxy-{{ openstack_release }}.qcow2 does not exist. Did you build the image? - name: Set up openstack virtualenv - pip: + ansible.builtin.pip: virtualenv: "{{ venv }}" virtualenv_command: python3 -m venv name: @@ -40,7 +40,7 @@ - name: Query Octavia Amphora image vars: ansible_python_interpreter: "{{ venv }}/bin/python" - os_image_info: + openstack.cloud.image_info: auth_type: password auth: "{{ openstack_auth }}" ca_cert: "{{ openstack_cacert }}" @@ -51,7 +51,7 @@ - name: Ensure Octavia Amphora image is renamed vars: ansible_python_interpreter: "{{ venv }}/bin/python" - shell: + ansible.builtin.command: cmd: >- {{ venv }}/bin/openstack image set amphora-x64-haproxy --name amphora-x64-haproxy-{{ ansible_facts.date_time.iso8601_basic_short }} when: @@ -63,7 +63,7 @@ - name: Ensure Octavia Amphora image is registered vars: ansible_python_interpreter: "{{ venv }}/bin/python" - os_image: + openstack.cloud.image: auth_type: password auth: "{{ openstack_auth }}" ca_cert: "{{ openstack_cacert }}" @@ -71,7 +71,7 @@ name: amphora-x64-haproxy container_format: bare disk_format: qcow2 - is_public: no + is_public: false filename: "{{ image_cache_path }}/amphora-x64-haproxy-{{ openstack_release }}.qcow2" properties: hw_architecture: x86_64 @@ -80,7 +80,7 @@ # FIXME: Use 'tags' parameter of os_image module available from # openstack.cloud.image 1.5.0. - name: Ensure Octavia Amphora image is tagged - shell: + ansible.builtin.command: cmd: >- {{ venv }}/bin/openstack image set amphora-x64-haproxy --tag amphora environment: "{{ openstack_auth_env }}" diff --git a/etc/kayobe/ansible/openstack-host-image-upload.yml b/etc/kayobe/ansible/openstack-host-image-upload.yml index 2c92d2446..daff0549d 100644 --- a/etc/kayobe/ansible/openstack-host-image-upload.yml +++ b/etc/kayobe/ansible/openstack-host-image-upload.yml @@ -5,15 +5,16 @@ - name: Upload an OS image to Glance hosts: seed vars: - local_image_path: "/opt/kayobe/images/overcloud-{{ os_distribution }}-{{ os_release }}/overcloud-{{ os_distribution }}-{{ os_release }}.qcow2" - image_name: "overcloud-{{ os_distribution }}-{{ os_release }}" + local_image_path: /opt/kayobe/images/overcloud-{{ os_distribution }}-{{ os_release }}/overcloud-{{ os_distribution }}-{{ os_release }}.qcow2 + image_name: overcloud-{{ os_distribution }}-{{ os_release }} tasks: - - block: + - name: Upload image to Glance + block: - name: Write out clouds.yaml - copy: + ansible.builtin.copy: content: "{{ lookup('ansible.builtin.env', 'CLOUDS_YAML') }}" dest: clouds.yaml - mode: 0600 + mode: "0600" - name: Write out secure.yaml no_log: true @@ -24,13 +25,13 @@ auth: application_credential_id: "{{ lookup('ansible.builtin.env', 'OS_APPLICATION_CREDENTIAL_ID') }}" application_credential_secret: "{{ lookup('ansible.builtin.env', 'OS_APPLICATION_CREDENTIAL_SECRET') }}" - copy: + ansible.builtin.copy: content: "{{ os_secrets | to_nice_yaml }}" dest: secure.yaml - mode: 0600 + mode: "0600" - name: Ensure dependencies are installed - pip: + ansible.builtin.pip: name: openstacksdk - name: Upload an image to Glance @@ -44,11 +45,11 @@ always: - name: Remove clouds.yaml - file: + ansible.builtin.file: path: clouds.yaml state: absent - name: Remove secure.yaml - file: + ansible.builtin.file: path: secure.yaml state: absent diff --git a/etc/kayobe/ansible/ovn-fix-chassis-priorities.yml b/etc/kayobe/ansible/ovn-fix-chassis-priorities.yml index 9ba469ce7..f5c7197fc 100644 --- a/etc/kayobe/ansible/ovn-fix-chassis-priorities.yml +++ b/etc/kayobe/ansible/ovn-fix-chassis-priorities.yml @@ -33,7 +33,7 @@ - name: Group hosts by leader/follower role ansible.builtin.group_by: - key: "ovn_nb_{{ 'leader' if ovn_check_result.rc == 0 else 'follower' }}" + key: ovn_nb_{{ 'leader' if ovn_check_result.rc == 0 else 'follower' }} changed_when: false - name: Assert one leader exists diff --git a/etc/kayobe/ansible/prometheus-network-names.yml b/etc/kayobe/ansible/prometheus-network-names.yml index 5a1f83711..73d3c449f 100644 --- a/etc/kayobe/ansible/prometheus-network-names.yml +++ b/etc/kayobe/ansible/prometheus-network-names.yml @@ -1,10 +1,10 @@ --- - name: Prometheus friendly network names hosts: overcloud - gather_facts: no + gather_facts: false tasks: - name: Gather network maps from each host with unique identifiers - set_fact: + ansible.builtin.set_fact: host_network_maps: >- {%- set if_list = [] -%} {%- for i in network_interfaces -%} @@ -20,13 +20,13 @@ {{ if_list }} - name: Aggregate network maps from all hosts - set_fact: + ansible.builtin.set_fact: prometheus_network_maps_aggregated: "{{ groups['overcloud'] | map('extract', hostvars, 'host_network_maps') | flatten }}" run_once: true delegate_to: localhost - name: Deduplicate the aggregated list based on unique IDs - set_fact: + ansible.builtin.set_fact: prometheus_network_maps_blob: "{{ prometheus_network_maps_aggregated | unique(attribute='unique_id') }}" run_once: true delegate_to: localhost diff --git a/etc/kayobe/ansible/pulp-auth-proxy.yml b/etc/kayobe/ansible/pulp-auth-proxy.yml index c5c76efc8..728963858 100644 --- a/etc/kayobe/ansible/pulp-auth-proxy.yml +++ b/etc/kayobe/ansible/pulp-auth-proxy.yml @@ -5,7 +5,8 @@ hosts: container-image-builders gather_facts: false tasks: - - import_role: + - name: Deploy Pulp auth proxy + ansible.builtin.import_role: name: pulp_auth_proxy vars: pulp_auth_proxy_url: "{{ stackhpc_release_pulp_url }}" diff --git a/etc/kayobe/ansible/pulp-container-publish.yml b/etc/kayobe/ansible/pulp-container-publish.yml index 1cc652048..e98acf601 100644 --- a/etc/kayobe/ansible/pulp-container-publish.yml +++ b/etc/kayobe/ansible/pulp-container-publish.yml @@ -1,9 +1,10 @@ --- - name: Publish Pulp container images hosts: localhost - gather_facts: True + gather_facts: true tasks: - - import_role: + - name: Ensure repository publications exist + ansible.builtin.import_role: name: stackhpc.pulp.pulp_distribution # NOTE: use intermediate variable to avoid distributing RPMs. vars: diff --git a/etc/kayobe/ansible/pulp-container-sync.yml b/etc/kayobe/ansible/pulp-container-sync.yml index 59b8ec024..285387778 100644 --- a/etc/kayobe/ansible/pulp-container-sync.yml +++ b/etc/kayobe/ansible/pulp-container-sync.yml @@ -1,9 +1,10 @@ --- - name: Sync client Pulp container repositories hosts: localhost - gather_facts: False + gather_facts: false tasks: - - import_role: + - name: Sync Pulp container repositories + ansible.builtin.import_role: name: stackhpc.pulp.pulp_repository # NOTE: use intermediate variable to avoid syncing RPMs. vars: diff --git a/etc/kayobe/ansible/pulp-host-image-download.yml b/etc/kayobe/ansible/pulp-host-image-download.yml index 6b3494dfe..1c8672c71 100644 --- a/etc/kayobe/ansible/pulp-host-image-download.yml +++ b/etc/kayobe/ansible/pulp-host-image-download.yml @@ -11,10 +11,9 @@ {{ stackhpc_overcloud_host_image_version }}/\ overcloud-{{ os_distribution }}-{{ os_release }}\ {{ '-ofed' if stackhpc_overcloud_host_image_is_ofed else '' }}.qcow2" - tasks: - name: Print image information - debug: + ansible.builtin.debug: msg: | OS Distribution: {{ os_distribution }} OS Release: {{ os_release }} @@ -23,14 +22,14 @@ # TODO: Add checksum support - name: Download image artifact - get_url: + ansible.builtin.get_url: url: "{{ stackhpc_overcloud_host_image_url_no_auth }}" username: "{{ stackhpc_image_repository_username }}" password: "{{ stackhpc_image_repository_password }}" force_basic_auth: true unredirected_headers: - - "Authorization" - dest: "/tmp/{{ os_distribution }}-{{ os_release }}.qcow2" + - Authorization + dest: /tmp/{{ os_distribution }}-{{ os_release }}.qcow2 mode: "0644" register: image_download_result until: image_download_result.status_code == 200 diff --git a/etc/kayobe/ansible/pulp-host-image-promote.yml b/etc/kayobe/ansible/pulp-host-image-promote.yml index 42f98b423..875ab85ab 100644 --- a/etc/kayobe/ansible/pulp-host-image-promote.yml +++ b/etc/kayobe/ansible/pulp-host-image-promote.yml @@ -6,11 +6,10 @@ remote_pulp_url: "{{ stackhpc_release_pulp_url }}" remote_pulp_username: "{{ stackhpc_image_repository_username }}" remote_pulp_password: "{{ stackhpc_image_repository_password }}" - repository_name: "kayobe-images-{{ openstack_release }}-{{ os_distribution }}-{{ os_release }}" - base_path: "kayobe-images/{{ openstack_release }}/{{ os_distribution }}/{{ os_release }}" + repository_name: kayobe-images-{{ openstack_release }}-{{ os_distribution }}-{{ os_release }} + base_path: kayobe-images/{{ openstack_release }}/{{ os_distribution }}/{{ os_release }} promotion_tag: "{{ lookup('env', 'OVERCLOUD_HOST_IMAGE_TAG') }}" tasks: - - name: Check whether the image exists pulp.squeezer.file_distribution: pulp_url: "{{ remote_pulp_url }}" @@ -24,8 +23,8 @@ delay: 5 - name: Fail if the image does not exist - fail: - msg: "Image {{ promotion_tag }} does not exist" + ansible.builtin.fail: + msg: Image {{ promotion_tag }} does not exist when: distribution_details.distribution is none - name: Ensure production content guard is set @@ -43,5 +42,5 @@ delay: 5 - name: Print version tag and os - debug: + ansible.builtin.debug: msg: "Promoted tag: {{ promotion_tag }}" diff --git a/etc/kayobe/ansible/pulp-host-image-upload.yml b/etc/kayobe/ansible/pulp-host-image-upload.yml index cc4876080..024ee9591 100644 --- a/etc/kayobe/ansible/pulp-host-image-upload.yml +++ b/etc/kayobe/ansible/pulp-host-image-upload.yml @@ -5,17 +5,17 @@ remote_pulp_url: "{{ stackhpc_release_pulp_url }}" remote_pulp_username: "{{ stackhpc_image_repository_username }}" remote_pulp_password: "{{ stackhpc_image_repository_password }}" - repository_name: "kayobe-images-{{ openstack_release }}-{{ os_distribution }}-{{ os_release }}" - pulp_base_path: "kayobe-images/{{ openstack_release }}/{{ os_distribution }}/{{ os_release }}" + repository_name: kayobe-images-{{ openstack_release }}-{{ os_distribution }}-{{ os_release }} + pulp_base_path: kayobe-images/{{ openstack_release }}/{{ os_distribution }}/{{ os_release }} tasks: - name: Print image tag - debug: + ansible.builtin.debug: msg: "Image tag: {{ host_image_tag }}" - name: Get filename - find: + ansible.builtin.find: paths: "{{ image_path }}" - patterns: '*.qcow2' + patterns: "*.qcow2" register: found_files - name: Upload an artifact @@ -101,7 +101,9 @@ until: latest_distribution_details is success retries: 3 delay: 5 + notify: Latest distribution details changed + handlers: - name: Create distribution for given version pulp.squeezer.file_distribution: pulp_url: "{{ remote_pulp_url }}" @@ -112,39 +114,36 @@ publication: "{{ publication_details.publication.pulp_href }}" content_guard: development state: present - when: latest_distribution_details.changed + listen: Latest distribution details changed register: distribution_result until: distribution_result is success retries: 3 delay: 5 - name: Update new images file with versioned path - lineinfile: + ansible.builtin.lineinfile: path: /tmp/updated_images.txt - line: "{{ remote_pulp_url }}/pulp/content/{{ pulp_base_path }}/\ - {{ host_image_tag }}/{{ found_files.files[0].path | basename }}" + line: "{{ remote_pulp_url }}/pulp/content/{{ pulp_base_path }}/{{ host_image_tag }}/{{ found_files.files[0].path | basename }}" create: true + listen: Latest distribution details changed - name: Update new images file with latest path - lineinfile: + ansible.builtin.lineinfile: path: /tmp/updated_images.txt - line: "{{ remote_pulp_url }}/pulp/content/{{ pulp_base_path }}/\ - latest/{{ found_files.files[0].path | basename }}" - when: latest_distribution_details.changed + line: "{{ remote_pulp_url }}/pulp/content/{{ pulp_base_path }}/latest/{{ found_files.files[0].path | basename }}" + listen: Latest distribution details changed - name: Print versioned path - debug: - msg: "New versioned path: {{ remote_pulp_url }}/pulp/content/{{ pulp_base_path }}/\ - {{ host_image_tag }}/{{ found_files.files[0].path | basename }}" - when: latest_distribution_details.changed + ansible.builtin.debug: + msg: "New versioned path: {{ remote_pulp_url }}/pulp/content/{{ pulp_base_path }}/{{ host_image_tag }}/{{ found_files.files[0].path | basename }}" + listen: Latest distribution details changed - name: Print latest path - debug: - msg: "New latest path: {{ remote_pulp_url }}/pulp/content/{{ pulp_base_path }}/\ - latest/{{ found_files.files[0].path | basename }}" - when: latest_distribution_details.changed + ansible.builtin.debug: + msg: "New latest path: {{ remote_pulp_url }}/pulp/content/{{ pulp_base_path }}/latest/{{ found_files.files[0].path | basename }}" + listen: Latest distribution details changed - name: Print version tag - debug: + ansible.builtin.debug: msg: "New tag: {{ host_image_tag }}" - when: latest_distribution_details.changed + listen: Latest distribution details changed diff --git a/etc/kayobe/ansible/pulp-repo-promote-production.yml b/etc/kayobe/ansible/pulp-repo-promote-production.yml index 65c6ceed8..a3e50c726 100644 --- a/etc/kayobe/ansible/pulp-repo-promote-production.yml +++ b/etc/kayobe/ansible/pulp-repo-promote-production.yml @@ -1,9 +1,10 @@ --- - name: Promote development Pulp repositories to production hosts: localhost - gather_facts: True + gather_facts: true tasks: - - import_role: + - name: Ensure repository publications exist + ansible.builtin.import_role: name: stackhpc.pulp.pulp_distribution vars: pulp_distribution_deb: "{{ stackhpc_pulp_distribution_deb_production | selectattr('required') }}" diff --git a/etc/kayobe/ansible/pulp-repo-publish.yml b/etc/kayobe/ansible/pulp-repo-publish.yml index c93a5b685..1b8ca1f2d 100644 --- a/etc/kayobe/ansible/pulp-repo-publish.yml +++ b/etc/kayobe/ansible/pulp-repo-publish.yml @@ -1,16 +1,18 @@ --- - name: Publish Pulp repositories hosts: localhost - gather_facts: False + gather_facts: false tasks: - - import_role: + - name: Ensure repository publications exist + ansible.builtin.import_role: name: stackhpc.pulp.pulp_publication # NOTE: use intermediate variable to avoid publishing containers. vars: pulp_publication_deb: "{{ stackhpc_pulp_publication_deb_development | selectattr('required') }}" pulp_publication_rpm: "{{ stackhpc_pulp_publication_rpm_development | selectattr('required') }}" - - import_role: + - name: Ensure repository distributions exist + ansible.builtin.import_role: name: stackhpc.pulp.pulp_distribution vars: pulp_distribution_deb: "{{ stackhpc_pulp_distribution_deb_development | selectattr('required') }}" diff --git a/etc/kayobe/ansible/pulp-repo-sync.yml b/etc/kayobe/ansible/pulp-repo-sync.yml index 3d2e490d3..e47a2f572 100644 --- a/etc/kayobe/ansible/pulp-repo-sync.yml +++ b/etc/kayobe/ansible/pulp-repo-sync.yml @@ -1,9 +1,10 @@ --- - name: Sync Pulp repositories hosts: localhost - gather_facts: False + gather_facts: false tasks: - - import_role: + - name: Sync Pulp repositories + ansible.builtin.import_role: name: stackhpc.pulp.pulp_repository # NOTE: use intermediate variable to avoid syncing containers. vars: diff --git a/etc/kayobe/ansible/purge-command-not-found.yml b/etc/kayobe/ansible/purge-command-not-found.yml index d1ed8219f..855bfbc10 100644 --- a/etc/kayobe/ansible/purge-command-not-found.yml +++ b/etc/kayobe/ansible/purge-command-not-found.yml @@ -18,10 +18,10 @@ # python interpreter. ansible_python_interpreter: /usr/bin/python3 # Work around no known_hosts entry on first boot. - ansible_ssh_common_args: "-o StrictHostKeyChecking=no" + ansible_ssh_common_args: -o StrictHostKeyChecking=no tasks: - name: Purge command-not-found package - package: + ansible.builtin.package: name: - command-not-found - python3-command-not-found diff --git a/etc/kayobe/ansible/push-ofed.yml b/etc/kayobe/ansible/push-ofed.yml index c0214a0b0..3b1130c20 100644 --- a/etc/kayobe/ansible/push-ofed.yml +++ b/etc/kayobe/ansible/push-ofed.yml @@ -17,7 +17,7 @@ - name: Lookup Pulp RPMs on builder ansible.builtin.find: - paths: "/home/cloud-user/ofed" + paths: /home/cloud-user/ofed register: rpm_dir - name: Upload OFED RPMs to Pulp diff --git a/etc/kayobe/ansible/rabbitmq-reset.yml b/etc/kayobe/ansible/rabbitmq-reset.yml index b068413a5..405b14830 100644 --- a/etc/kayobe/ansible/rabbitmq-reset.yml +++ b/etc/kayobe/ansible/rabbitmq-reset.yml @@ -4,20 +4,20 @@ - name: Reset RabbitMQ hosts: controllers - become: True - gather_facts: no + become: true + gather_facts: false tags: - rabbitmq-reset vars: - - container_name: rabbitmq + container_name: rabbitmq tasks: - name: Checking timedatectl status - command: timedatectl status + ansible.builtin.command: timedatectl status register: timedatectl_status changed_when: false - name: Fail if the clock is not synchronized - fail: + ansible.builtin.fail: msg: >- timedatectl sees the system clock as unsynchronized. You may need to force synchronisation using `chronyc makestep`. @@ -26,46 +26,45 @@ - "'synchronized: yes' not in timedatectl_status.stdout" - name: Inspect the {{ container_name }} container - shell: - cmd: "docker container inspect --format '{{ '{{' }} .State.Running {{ '}}' }}' {{ container_name }}" + ansible.builtin.shell: + cmd: docker container inspect --format '{{ '{{' }} .State.Running {{ '}}' }}' {{ container_name }} register: inspection - name: Ensure the {{ container_name }} container is running - command: "systemctl start kolla-{{ container_name }}-container.service" + ansible.builtin.command: systemctl start kolla-{{ container_name }}-container.service when: inspection.stdout == 'false' - name: Wait for the {{ container_name }} container to reach state 'Running' - shell: - cmd: "docker container inspect --format '{{ '{{' }} .State.Running {{ '}}' }}' {{ container_name }}" + ansible.builtin.shell: + cmd: docker container inspect --format '{{ '{{' }} .State.Running {{ '}}' }}' {{ container_name }} register: result until: result.stdout == 'true' retries: 10 delay: 6 - name: Wait for the rabbitmq node to automatically start on container start - command: "docker exec {{ container_name }} /bin/bash -c 'rabbitmqctl wait /var/lib/rabbitmq/mnesia/rabbitmq.pid --timeout 60'" + ansible.builtin.command: docker exec {{ container_name }} /bin/bash -c 'rabbitmqctl wait /var/lib/rabbitmq/mnesia/rabbitmq.pid --timeout 60' when: inspection.stdout == 'false' - name: Stop app - command: "docker exec {{ container_name }} /bin/bash -c 'rabbitmqctl stop_app'" - + ansible.builtin.command: docker exec {{ container_name }} /bin/bash -c 'rabbitmqctl stop_app' - name: Force reset app - command: "docker exec {{ container_name }} /bin/bash -c 'rabbitmqctl force_reset'" - + ansible.builtin.command: docker exec {{ container_name }} /bin/bash -c 'rabbitmqctl force_reset' - name: Start app - command: "docker exec {{ container_name }} /bin/bash -c 'rabbitmqctl start_app'" - + ansible.builtin.command: docker exec {{ container_name }} /bin/bash -c 'rabbitmqctl start_app' - name: Wait for all nodes to join the cluster - command: "docker exec {{ container_name }} /bin/bash -c 'rabbitmqctl await_online_nodes {{ groups['controllers'] | length }}'" - + ansible.builtin.command: docker exec {{ container_name }} /bin/bash -c 'rabbitmqctl await_online_nodes {{ groups['controllers'] | length }}' - name: Restart OpenStack services hosts: controllers:compute become: true - gather_facts: no + gather_facts: false tags: - restart-openstack tasks: # The following services use RabbitMQ. - name: Restart OpenStack services - shell: >- - systemctl -a | egrep 'kolla-(barbican|blazar|cinder|cloudkitty|designate|heat|ironic|keystone|magnum|manila|neutron|nova|octavia)' | awk '{ print $1 }' | xargs systemctl restart + ansible.builtin.shell: >- + set -o pipefail && + systemctl -a | egrep 'kolla-(barbican|blazar|cinder|cloudkitty|designate|heat|ironic|keystone|magnum|manila|neutron|nova|octavia)' | + awk '{ print $1 }' | + xargs systemctl restart diff --git a/etc/kayobe/ansible/reboot.yml b/etc/kayobe/ansible/reboot.yml index 545c509df..1af22e7f7 100644 --- a/etc/kayobe/ansible/reboot.yml +++ b/etc/kayobe/ansible/reboot.yml @@ -8,20 +8,20 @@ reboot_with_bootstrap_user: false ansible_user: "{{ bootstrap_user if reboot_with_bootstrap_user | bool else kayobe_ansible_user }}" ansible_ssh_common_args: "{{ '-o StrictHostKeyChecking=no' if reboot_with_bootstrap_user | bool else '' }}" - ansible_python_interpreter: "/usr/bin/python3" + ansible_python_interpreter: /usr/bin/python3 tags: - reboot tasks: - name: Reboot and wait become: true - reboot: + ansible.builtin.reboot: reboot_timeout: "{{ reboot_timeout_s }}" search_paths: # Systems running molly-guard hang waiting for confirmation before rebooting without this. - - "/lib/molly-guard" + - /lib/molly-guard # Default list: - - "/sbin" - - "/bin" - - "/usr/sbin" - - "/usr/bin" - - "/usr/local/sbin" + - /sbin + - /bin + - /usr/sbin + - /usr/bin + - /usr/local/sbin diff --git a/etc/kayobe/ansible/rekey-hosts.yml b/etc/kayobe/ansible/rekey-hosts.yml index a72da3ac7..eee15ead4 100644 --- a/etc/kayobe/ansible/rekey-hosts.yml +++ b/etc/kayobe/ansible/rekey-hosts.yml @@ -30,7 +30,7 @@ - name: Fail when existing private key does not exist ansible.builtin.fail: - msg: "No existing private key file found. Check existing_private_key_path is set correctly." + msg: No existing private key file found. Check existing_private_key_path is set correctly. when: - not stat_result.stat.exists delegate_to: localhost @@ -45,7 +45,7 @@ - name: Fail when existing public key does not exist ansible.builtin.fail: - msg: "No existing public key file found. Check existing_public_key_path is set correctly." + msg: No existing public key file found. Check existing_public_key_path is set correctly. when: - not stat_result.stat.exists delegate_to: localhost @@ -69,26 +69,29 @@ become: true - name: Locally deprecate existing key (private) - command: "mv {{ existing_private_key_path }} {{ deprecated_key_path }}" + ansible.builtin.command: mv {{ existing_private_key_path }} {{ deprecated_key_path }} delegate_to: localhost run_once: true - name: Locally deprecate existing key (public) - command: "mv {{ existing_public_key_path }} {{ deprecated_key_path }}.pub" + ansible.builtin.command: mv {{ existing_public_key_path }} {{ deprecated_key_path }}.pub delegate_to: localhost run_once: true - name: Locally promote new key (private) - command: "mv {{ existing_private_key_path }}_new {{ new_private_key_path }}" + ansible.builtin.command: mv {{ existing_private_key_path }}_new {{ new_private_key_path }} delegate_to: localhost run_once: true - name: Locally promote new key (public) - command: "mv {{ existing_private_key_path }}_new.pub {{ new_public_key_path }}" + ansible.builtin.command: mv {{ existing_private_key_path }}_new.pub {{ new_public_key_path }} delegate_to: localhost run_once: true - - block: + - name: Remove old key + tags: remove-key + when: rekey_remove_existing_key | bool + block: - name: Stat old key file ansible.builtin.stat: path: "{{ deprecated_key_path }}.pub" @@ -98,7 +101,7 @@ - name: Fail when deprecated public key does not exist ansible.builtin.fail: - msg: "No deprecated public key file found. Check deprecated_key_path is set correctly." + msg: No deprecated public key file found. Check deprecated_key_path is set correctly. when: - not stat_result.stat.exists delegate_to: localhost @@ -113,5 +116,3 @@ key: "{{ lookup('file', lookup_path) }}" loop: "{{ rekey_users }}" become: true - tags: remove-key - when: rekey_remove_existing_key | bool diff --git a/etc/kayobe/ansible/reset-bls-entries.yml b/etc/kayobe/ansible/reset-bls-entries.yml index 68989d1bb..e26593b70 100644 --- a/etc/kayobe/ansible/reset-bls-entries.yml +++ b/etc/kayobe/ansible/reset-bls-entries.yml @@ -11,7 +11,7 @@ - reset-bls-entries tasks: - name: Get machine ID - command: cat /etc/machine-id + ansible.builtin.command: cat /etc/machine-id register: machine_id check_mode: false @@ -25,15 +25,15 @@ # We set force to false to avoid replacing an existing BLS entry with the # correct machine ID. - name: Rename entries with wrong machine ID - copy: - src: "/boot/loader/entries/{{ item }}" - dest: "/boot/loader/entries/{{ item | ansible.builtin.regex_replace('^[a-f0-9]*', machine_id.stdout) }}" + ansible.builtin.copy: + src: /boot/loader/entries/{{ item }} + dest: /boot/loader/entries/{{ item | ansible.builtin.regex_replace('^[a-f0-9]*', machine_id.stdout) }} force: false remote_src: true with_items: "{{ bls_entries.files | map(attribute='path') | reject('search', machine_id.stdout) | map('basename') }}" - name: Remove entries with wrong machine ID - file: - path: "/boot/loader/entries/{{ item }}" + ansible.builtin.file: + path: /boot/loader/entries/{{ item }} state: absent with_items: "{{ bls_entries.files | map(attribute='path') | reject('search', machine_id.stdout) | map('basename') }}" diff --git a/etc/kayobe/ansible/roles/pulp_auth_proxy/tasks/main.yml b/etc/kayobe/ansible/roles/pulp_auth_proxy/tasks/main.yml index 7f412c5f2..6cbe55e5e 100644 --- a/etc/kayobe/ansible/roles/pulp_auth_proxy/tasks/main.yml +++ b/etc/kayobe/ansible/roles/pulp_auth_proxy/tasks/main.yml @@ -1,5 +1,6 @@ --- -- when: pulp_auth_proxy_network_mode is none +- name: Set facts + when: pulp_auth_proxy_network_mode is none block: - name: Check if Docker bridge network exists community.docker.docker_host_info: @@ -11,7 +12,7 @@ pulp_auth_proxy_network_mode: "{{ 'host' if docker_host_info.networks | selectattr('Driver', 'equalto', 'bridge') | list | length == 0 else 'bridge' }}" - name: Assert that localhost is resolvable when using host networking - assert: + ansible.builtin.assert: that: - "'localhost' is ansible.utils.resolvable" fail_msg: >- @@ -19,18 +20,18 @@ Consider adding '127.0.0.1 localhost' to /etc/hosts. when: pulp_auth_proxy_network_mode == 'host' -- name: "Ensure {{ pulp_auth_proxy_conf_path }} exists" +- name: Ensure {{ pulp_auth_proxy_conf_path }} exists ansible.builtin.file: path: "{{ pulp_auth_proxy_conf_path }}" state: directory - mode: 0700 + mode: "0700" become: true - name: Ensure pulp_proxy.conf is templated ansible.builtin.template: src: pulp_proxy.conf.j2 dest: "{{ pulp_auth_proxy_conf_path }}/pulp_proxy.conf" - mode: 0600 + mode: "0600" become: true register: pulp_proxy_conf diff --git a/etc/kayobe/ansible/rsyslog.yml b/etc/kayobe/ansible/rsyslog.yml index 4b9791dc3..79c04b82f 100644 --- a/etc/kayobe/ansible/rsyslog.yml +++ b/etc/kayobe/ansible/rsyslog.yml @@ -8,29 +8,29 @@ - name: Configure rsyslog to forward messages hosts: "{{ rsyslog_group | default('overcloud') }}" - become: yes + become: true tasks: - - name: Ensure rsyslog is installed - package: - name: rsyslog - state: present + - name: Ensure rsyslog is installed + ansible.builtin.package: + name: rsyslog + state: present - - name: Ensure rsyslog is started and enabled - systemd: - state: started - enabled: yes - name: rsyslog + - name: Ensure rsyslog is started and enabled + ansible.builtin.systemd: + state: started + enabled: true + name: rsyslog - - name: Update rsyslog configuration - lineinfile: - path: /etc/rsyslog.conf - insertafter: "^#*.* @@remote-host:514" - line: "*.* @{{ internal_net_name | net_ip }}:5140" - register: rsyslog_config + - name: Update rsyslog configuration + ansible.builtin.lineinfile: + path: /etc/rsyslog.conf + insertafter: ^#*.* @@remote-host:514 + line: "*.* @{{ internal_net_name | net_ip }}:5140" + notify: Restart rsyslog - - name: Restart rsyslog - systemd: - state: restarted - name: rsyslog - when: rsyslog_config.changed + handlers: + - name: Restart rsyslog + ansible.builtin.systemd: + state: restarted + name: rsyslog diff --git a/etc/kayobe/ansible/run-container-hotfix.yml b/etc/kayobe/ansible/run-container-hotfix.yml index de652e451..8636a85b6 100644 --- a/etc/kayobe/ansible/run-container-hotfix.yml +++ b/etc/kayobe/ansible/run-container-hotfix.yml @@ -1,23 +1,26 @@ --- -- block: +- name: Run container_hotfix + when: container_hotfix_files != [] + block: - name: Ensure any required directories exist in container(s) - command: "{{ kolla_container_engine | default('docker') }} exec {{ '-u 0' if container_hotfix_become else '' }} {{ hotfix_container }} mkdir -p {{ item.dest | dirname }}" + ansible.builtin.command: "{{ kolla_container_engine | default('docker') }} exec {{ '-u 0' if container_hotfix_become else '' }} {{ hotfix_container }} mkdir + -p {{ item.dest | dirname }}" loop: "{{ container_hotfix_files }}" - name: Copy file into container(s) - command: "{{ kolla_container_engine | default('docker') }} cp /tmp/hotfix-files/{{ index }} {{ hotfix_container }}:{{ item.dest }}" + ansible.builtin.command: "{{ kolla_container_engine | default('docker') }} cp /tmp/hotfix-files/{{ index }} {{ hotfix_container }}:{{ item.dest }}" loop: "{{ container_hotfix_files }}" loop_control: index_var: index - name: Set mode for copied files - command: "{{ kolla_container_engine | default('docker') }} exec {{ '-u 0' if container_hotfix_become else '' }} {{ hotfix_container }} chmod {{ item.mode | default('400') }} {{ item.dest }}" + ansible.builtin.command: "{{ kolla_container_engine | default('docker') }} exec {{ '-u 0' if container_hotfix_become else '' }} {{ hotfix_container }} chmod + {{ item.mode | default('400') }} {{ item.dest }}" loop: "{{ container_hotfix_files }}" loop_control: index_var: index - when: container_hotfix_files != [] - - name: Run container_hotfix_command - command: "{{ kolla_container_engine | default('docker')}} exec {{ '-u 0' if container_hotfix_become else '' }} {{ hotfix_container }} {{ container_hotfix_command }}" + ansible.builtin.command: "{{ kolla_container_engine | default('docker') }} exec \ + {{ '-u 0' if container_hotfix_become else '' }} {{ hotfix_container }} {{ container_hotfix_command }}" when: container_hotfix_command diff --git a/etc/kayobe/ansible/smartmon-tools.yml b/etc/kayobe/ansible/smartmon-tools.yml index b4a064b63..a8f096439 100644 --- a/etc/kayobe/ansible/smartmon-tools.yml +++ b/etc/kayobe/ansible/smartmon-tools.yml @@ -1,9 +1,10 @@ --- -- hosts: overcloud +- name: Install smartmon-tools + hosts: overcloud tasks: - name: Ensure smartmontools, jq, nvme-cli and cron/cronie are installed - package: + ansible.builtin.package: name: - smartmontools - nvme-cli @@ -13,39 +14,42 @@ become: true - name: Ensure the cron/crond service is running - service: + ansible.builtin.service: name: "{{ 'cron' if ansible_facts['distribution'] == 'Ubuntu' else 'crond' }}" state: started enabled: true become: true - name: Copy smartmon.sh and nvmemon.sh from scripts folder - copy: - src: "scripts/{{ item }}" + ansible.builtin.copy: + src: scripts/{{ item }} dest: /usr/local/bin/ - owner: 'root' - group: 'root' - mode: '0700' + owner: root + group: root + mode: "0700" loop: - smartmon.sh - nvmemon.sh - become: yes + become: true - name: Set PATH Variable for cron - cron: + ansible.builtin.cron: name: PATH user: root - env: yes + env: true job: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - become: yes + become: true - name: Schedule cronjob to run both scripts every 5 minutes and save output to file - cron: - name: "SMART metrics for drive monitoring using {{ item }}" + ansible.builtin.cron: + name: SMART metrics for drive monitoring using {{ item }} user: root minute: "*/5" - job: "/usr/local/bin/{{ item }}.sh > /var/lib/docker/volumes/textfile/_data/{{ item }}.prom.temp && mv -f /var/lib/docker/volumes/textfile/_data/{{ item }}.prom.temp /var/lib/docker/volumes/textfile/_data/{{ item }}.prom" + job: >- + /usr/local/bin/{{ item }}.sh > + /var/lib/docker/volumes/textfile/_data/{{ item }}.prom.temp && + mv -f /var/lib/docker/volumes/textfile/_data/{{ item }}.prom.temp /var/lib/docker/volumes/textfile/_data/{{ item }}.prom loop: - smartmon - nvmemon - become: yes + become: true diff --git a/etc/kayobe/ansible/stackhpc-openstack-tests.yml b/etc/kayobe/ansible/stackhpc-openstack-tests.yml index b99b9f91d..4e9bf0cff 100644 --- a/etc/kayobe/ansible/stackhpc-openstack-tests.yml +++ b/etc/kayobe/ansible/stackhpc-openstack-tests.yml @@ -5,12 +5,13 @@ - stackhpc-openstack-tests vars: sot_venv: "{{ virtualenv_path }}/sot-venv" - sot_repo: "https://github.com/stackhpc/stackhpc-openstack-tests" - sot_version: "v0.0.1" + sot_repo: https://github.com/stackhpc/stackhpc-openstack-tests + sot_version: v0.0.1 sot_timeout: 30 results_path_local: "{{ lookup('env', 'HOME') }}/sot-results" tasks: - - block: + - name: Stackhpc OpenStack tests + block: - name: Create a temporary directory for tests repo ansible.builtin.tempfile: state: directory @@ -31,12 +32,12 @@ depth: 1 single_branch: true - - name: Ensure the latest versions of pip and setuptools are installed # noqa package-latest + - name: Ensure the latest versions of pip and setuptools are installed # noqa package-latest ansible.builtin.pip: name: "{{ item.name }}" state: latest virtualenv: "{{ sot_venv }}" - virtualenv_command: "python3 -m venv" + virtualenv_command: python3 -m venv with_items: - { name: pip } - { name: setuptools } @@ -45,7 +46,7 @@ ansible.builtin.pip: name: - "{{ repo_tmpdir.path }}" - - "-r{{ repo_tmpdir.path }}/requirements.txt" + - -r{{ repo_tmpdir.path }}/requirements.txt - pytest-html - pytest-timeout virtualenv: "{{ sot_venv }}" @@ -78,7 +79,7 @@ sot_opensearch_port: 9200 sot_opensearch_tls: false sot_prometheus_url: "{{ kolla_internal_scheme }}://{{ kolla_internal_fqdn }}:9091" - sot_prometheus_username: "admin" + sot_prometheus_username: admin sot_prometheus_password: "{{ kolla_passwords.prometheus_password }}" always: - name: Fetch results diff --git a/etc/kayobe/ansible/stop-openstack-services.yml b/etc/kayobe/ansible/stop-openstack-services.yml index 35f26c044..f7d438094 100644 --- a/etc/kayobe/ansible/stop-openstack-services.yml +++ b/etc/kayobe/ansible/stop-openstack-services.yml @@ -6,24 +6,25 @@ become: true gather_facts: false vars: - - stop_service_list: - - "blazar" - - "barbican" - - "cinder" - - "cloudkitty" - - "designate" - - "glance" - - "heat" - - "horizon" - - "ironic" - - "keystone" - - "magnum" - - "manila" - - "neutron" - - "nova" - - "octavia" - - "placement" + stop_service_list: + - blazar + - barbican + - cinder + - cloudkitty + - designate + - glance + - heat + - horizon + - ironic + - keystone + - magnum + - manila + - neutron + - nova + - octavia + - placement tasks: - name: Stop OpenStack services - shell: >- + ansible.builtin.shell: >- + set -o pipefail && systemctl -a | egrep '({{ stop_service_list | join('|') }})' | awk '{ print $1 }' | xargs systemctl stop diff --git a/etc/kayobe/ansible/ubuntu-upgrade.yml b/etc/kayobe/ansible/ubuntu-upgrade.yml index b7cfe7338..8741b8cf8 100644 --- a/etc/kayobe/ansible/ubuntu-upgrade.yml +++ b/etc/kayobe/ansible/ubuntu-upgrade.yml @@ -8,7 +8,7 @@ reboot_timeout_s: "{{ 20 * 60 }}" tasks: - name: Assert that hosts are running Ubuntu Focal - assert: + ansible.builtin.assert: that: - ansible_facts.distribution == 'Ubuntu' - ansible_facts.distribution_major_version == '20' @@ -20,41 +20,41 @@ os_distribution is set to ubuntu. - name: Ensure apt packages are up to date - apt: + ansible.builtin.apt: update_cache: true - upgrade: yes + upgrade: true become: true - name: Ensure do-release-upgrade is installed - package: + ansible.builtin.package: name: ubuntu-release-upgrader-core state: latest become: true - name: Check whether a reboot is required - stat: + ansible.builtin.stat: path: /var/run/reboot-required register: file_status - name: Reboot to apply updates - reboot: + ansible.builtin.reboot: reboot_timeout: "{{ reboot_timeout_s }}" connect_timeout: 600 search_paths: # Systems running molly-guard hang waiting for confirmation before rebooting without this. - - "/lib/molly-guard" + - /lib/molly-guard # Default list: - - "/sbin" - - "/bin" - - "/usr/sbin" - - "/usr/bin" - - "/usr/local/sbin" + - /sbin + - /bin + - /usr/sbin + - /usr/bin + - /usr/local/sbin become: true when: file_status.stat.exists # NOTE: We cannot use apt_repository here because definitions must exist within the standard repos.list - name: Ensure Jammy repo definitions exist in sources.list - blockinfile: + ansible.builtin.blockinfile: path: /etc/apt/sources.list block: | deb {{ stackhpc_repo_ubuntu_jammy_url }} jammy main restricted universe multiverse @@ -64,12 +64,12 @@ become: true - name: Do release upgrade - command: do-release-upgrade -f DistUpgradeViewNonInteractive + ansible.builtin.command: do-release-upgrade -f DistUpgradeViewNonInteractive become: true - name: Ensure old venvs do not exist - file: - path: "/opt/kayobe/venvs/{{ item }}" + ansible.builtin.file: + path: /opt/kayobe/venvs/{{ item }} state: absent loop: - kayobe @@ -94,31 +94,31 @@ reboot_timeout_s: "{{ 20 * 60 }}" tasks: - name: Ensure Jammy repo definitions do not exist in sources.list - blockinfile: + ansible.builtin.blockinfile: path: /etc/apt/sources.list state: absent become: true - name: Ensure Kolla Ansible Docker repo definition does not exist - file: + ansible.builtin.file: path: /etc/apt/sources.list.d/docker.list state: absent become: true when: apt_repositories | selectattr('url', 'match', '.*docker-ce.*') | list | length > 0 - name: Reboot and wait - reboot: + ansible.builtin.reboot: reboot_timeout: "{{ reboot_timeout_s }}" connect_timeout: 600 search_paths: # Systems running molly-guard hang waiting for confirmation before rebooting without this. - - "/lib/molly-guard" + - /lib/molly-guard # Default list: - - "/sbin" - - "/bin" - - "/usr/sbin" - - "/usr/bin" - - "/usr/local/sbin" + - /sbin + - /bin + - /usr/sbin + - /usr/bin + - /usr/local/sbin become: true - name: Update distribution facts @@ -127,7 +127,7 @@ gather_subset: "{{ kayobe_ansible_setup_gather_subset }}" - name: Assert that hosts are now using Ubuntu 22 - assert: + ansible.builtin.assert: that: - ansible_facts.distribution_major_version == '22' - ansible_facts.distribution_release == 'jammy' diff --git a/etc/kayobe/ansible/vault-deploy-barbican.yml b/etc/kayobe/ansible/vault-deploy-barbican.yml index 98b0eabe7..0270dc8d0 100644 --- a/etc/kayobe/ansible/vault-deploy-barbican.yml +++ b/etc/kayobe/ansible/vault-deploy-barbican.yml @@ -1,32 +1,33 @@ --- - name: Configure AppRole - any_errors_fatal: True - gather_facts: True + any_errors_fatal: true + gather_facts: true hosts: controllers[0] vars: - vault_api_addr: "https://{{ internal_net_name | net_ip }}:8200" + vault_api_addr: https://{{ internal_net_name | net_ip }}:8200 vault_ca_cert: "{{ '/etc/pki/tls/certs/ca-bundle.crt' if ansible_facts.os_family == 'RedHat' else '/usr/local/share/ca-certificates/OS-TLS-ROOT.crt' }}" tasks: - name: Assert that secrets_barbican_approle_secret_id is defined - assert: + ansible.builtin.assert: that: - secrets_barbican_approle_secret_id is defined - fail_msg: "Please define secrets_barbican_approle_secret_id in your secrets.yml" + fail_msg: Please define secrets_barbican_approle_secret_id in your secrets.yml - name: Include Vault keys - include_vars: + ansible.builtin.include_vars: file: "{{ kayobe_env_config_path }}/vault/overcloud-vault-keys.json" name: vault_keys - name: Ensure hvac is installed - pip: + ansible.builtin.pip: name: hvac state: present extra_args: "{% if pip_upper_constraints_file %}-c {{ pip_upper_constraints_file }}{% endif %}" virtualenv: "{{ virtualenv_path }}/kayobe" - - environment: - https_proxy: '' + - name: Ensure AppRole is configured + environment: + https_proxy: "" block: - name: Enable AppRole auth module hashivault_auth_method: @@ -43,14 +44,14 @@ token: "{{ vault_keys.root_token }}" name: barbican backend: kv - description: "Barbican kv store" + description: Barbican kv store - name: Ensure barbican policy is defined hashivault_policy: url: "{{ vault_api_addr }}" ca_cert: "{{ vault_ca_cert }}" token: "{{ vault_keys.root_token }}" - name: "barbican-policy" + name: barbican-policy state: present rules: | path "barbican/*" { @@ -77,12 +78,12 @@ register: barbican_role_id - name: Print barbican Approle ID - debug: - msg: "barbican role id is {{ barbican_role_id.id }}" + ansible.builtin.debug: + msg: barbican role id is {{ barbican_role_id.id }} - name: Write barbican Approle ID to file if requested delegate_to: localhost - copy: + ansible.builtin.copy: content: "{{ barbican_role_id.id }}" dest: "{{ stackhpc_barbican_role_id_file_path | default('~/barbican-role-id') }}" when: stackhpc_write_barbican_role_id_to_file | default(false) | bool diff --git a/etc/kayobe/ansible/vault-deploy-overcloud.yml b/etc/kayobe/ansible/vault-deploy-overcloud.yml index d5e63b83d..fc87b9e5b 100644 --- a/etc/kayobe/ansible/vault-deploy-overcloud.yml +++ b/etc/kayobe/ansible/vault-deploy-overcloud.yml @@ -5,16 +5,16 @@ hosts: controllers tasks: - name: Copy the intermediate CA - copy: + ansible.builtin.copy: src: "{{ kayobe_env_config_path }}/vault/OS-TLS-ROOT.pem" - dest: "{{ '/etc/pki/ca-trust/source/anchors/OS-TLS-ROOT.crt' if ansible_facts.os_family == 'RedHat' else '/usr/local/share/ca-certificates/OS-TLS-ROOT.crt' }}" - mode: 0644 + dest: "{{ '/etc/pki/ca-trust/source/anchors/OS-TLS-ROOT.crt' if ansible_facts.os_family == 'RedHat' else '/usr/local/share/ca-certificates/OS-TLS-ROOT.crt' + }}" + mode: "0644" become: true - - name: update system CA + - name: Update system CA become: true - shell: "{{ 'update-ca-trust' if ansible_facts.os_family == 'RedHat' else 'update-ca-certificates' }}" - + ansible.builtin.command: "{{ 'update-ca-trust' if ansible_facts.os_family == 'RedHat' else 'update-ca-certificates' }}" - name: Deploy HashiCorp Vault on the overcloud any_errors_fatal: true gather_facts: true @@ -24,7 +24,7 @@ vault_bind_address: "{{ internal_net_name | net_ip }}" tasks: - name: Set a fact about the virtualenv on the remote system - set_fact: + ansible.builtin.set_fact: virtualenv: "{{ ansible_python_interpreter | dirname | dirname }}" when: - ansible_python_interpreter is defined @@ -32,7 +32,7 @@ - not ansible_python_interpreter.startswith('/usr/bin/') - name: Ensure Python hvac module is installed - pip: + ansible.builtin.pip: name: hvac state: latest extra_args: "{% if pip_upper_constraints_file %}-c {{ pip_upper_constraints_file }}{% endif %}" @@ -40,24 +40,25 @@ become: "{{ virtualenv is not defined }}" - name: Ensure /opt/kayobe/vault exists - file: + ansible.builtin.file: path: /opt/kayobe/vault state: directory - name: Template out TLS key and cert - copy: + ansible.builtin.copy: # Within the Hashicorp Vault container these uids & gids map to the vault user src: "{{ kayobe_env_config_path }}/vault/{{ item }}" - dest: "/opt/kayobe/vault/{{ item }}" + dest: /opt/kayobe/vault/{{ item }} owner: 100 group: 1000 - mode: 0600 + mode: "0600" loop: - "{% if kolla_internal_fqdn != kolla_internal_vip_address %}{{ kolla_internal_fqdn }}{% else %}overcloud{% endif %}.crt" - "{% if kolla_internal_fqdn != kolla_internal_vip_address %}{{ kolla_internal_fqdn }}{% else %}overcloud{% endif %}.key" - become: True + become: true - - import_role: + - name: Apply vault role + ansible.builtin.import_role: name: stackhpc.hashicorp.vault vars: hashicorp_registry_url: "{{ overcloud_hashicorp_registry_url }}" @@ -65,51 +66,54 @@ hashicorp_registry_password: "{{ overcloud_hashicorp_registry_password }}" consul_docker_image: "{{ overcloud_consul_docker_image }}" consul_docker_tag: "{{ overcloud_consul_docker_tag }}" - vault_config_dir: "/opt/kayobe/vault" - vault_cluster_name: "overcloud" + vault_config_dir: /opt/kayobe/vault + vault_cluster_name: overcloud vault_ca_cert: "{{ '/etc/pki/tls/certs/ca-bundle.crt' if ansible_facts.os_family == 'RedHat' else '/usr/local/share/ca-certificates/OS-TLS-ROOT.crt' }}" vault_docker_image: "{{ overcloud_vault_docker_image }}" vault_docker_tag: "{{ overcloud_vault_docker_tag }}" vault_tls_cert: "{% if kolla_internal_fqdn != kolla_internal_vip_address %}{{ kolla_internal_fqdn }}{% else %}overcloud{% endif %}.crt" vault_tls_key: "{% if kolla_internal_fqdn != kolla_internal_vip_address %}{{ kolla_internal_fqdn }}{% else %}overcloud{% endif %}.key" copy_self_signed_ca: true - vault_api_addr: "https://{{ internal_net_name | net_ip }}:8200" + vault_api_addr: https://{{ internal_net_name | net_ip }}:8200 vault_write_keys_file: true vault_write_keys_file_path: "{{ kayobe_env_config_path }}/vault/overcloud-vault-keys.json" - name: Include Vault keys - include_vars: + ansible.builtin.include_vars: file: "{{ kayobe_env_config_path }}/vault/overcloud-vault-keys.json" name: vault_keys - - import_role: + - name: Import vault unseal role + ansible.builtin.import_role: name: stackhpc.hashicorp.vault_unseal vars: - vault_api_addr: "https://{{ internal_net_name | net_ip }}:8200" + vault_api_addr: https://{{ internal_net_name | net_ip }}:8200 vault_unseal_token: "{{ vault_keys.root_token }}" - vault_unseal_ca_cert: "{{ '/etc/pki/tls/certs/ca-bundle.crt' if ansible_facts.os_family == 'RedHat' else '/usr/local/share/ca-certificates/OS-TLS-ROOT.crt' }}" + vault_unseal_ca_cert: "{{ '/etc/pki/tls/certs/ca-bundle.crt' if ansible_facts.os_family == 'RedHat' \ + else '/usr/local/share/ca-certificates/OS-TLS-ROOT.crt' }}" vault_unseal_keys: "{{ vault_keys.keys_base64 }}" environment: - https_proxy: '' + https_proxy: "" - name: Configure PKI any_errors_fatal: true gather_facts: true hosts: controllers[0] tasks: - - import_role: + - name: Import vault pki role + ansible.builtin.import_role: name: stackhpc.hashicorp.vault_pki vars: vault_token: "{{ vault_keys.root_token }}" - vault_api_addr: "https://{{ internal_net_name | net_ip }}:8200" + vault_api_addr: https://{{ internal_net_name | net_ip }}:8200 vault_ca_cert: "{{ '/etc/pki/tls/certs/ca-bundle.crt' if ansible_facts.os_family == 'RedHat' else '/usr/local/share/ca-certificates/OS-TLS-ROOT.crt' }}" vault_pki_root_create: false vault_pki_intermediate_import: true - vault_pki_intermediate_ca_name: "OS-TLS-INT" + vault_pki_intermediate_ca_name: OS-TLS-INT vault_pki_intermediate_ca_bundle: "{{ lookup('file', kayobe_env_config_path + '/vault/OS-TLS-INT.pem') }}" vault_pki_intermediate_ca_cert: "{{ lookup('file', kayobe_env_config_path + '/vault/OS-TLS-INT.crt') }}" vault_pki_intermediate_roles: "{{ overcloud_vault_pki_roles }}" vault_pki_write_certificate_files: true vault_pki_certificates_directory: "{{ kayobe_env_config_path }}/vault" environment: - https_proxy: '' + https_proxy: "" diff --git a/etc/kayobe/ansible/vault-deploy-seed.yml b/etc/kayobe/ansible/vault-deploy-seed.yml index e0918d421..a74557f63 100644 --- a/etc/kayobe/ansible/vault-deploy-seed.yml +++ b/etc/kayobe/ansible/vault-deploy-seed.yml @@ -1,15 +1,15 @@ --- - name: Deploy Hashicorp Vault on the seed - any_errors_fatal: True - gather_facts: True + any_errors_fatal: true + gather_facts: true hosts: seed vars: - consul_bind_interface: "lo" + consul_bind_interface: lo vault_bind_address: "{{ ansible_facts[consul_bind_interface].ipv4.address }}" - vault_api_addr: "http://{{ vault_bind_address }}:8200" + vault_api_addr: http://{{ vault_bind_address }}:8200 tasks: - name: Set a fact about the virtualenv on the remote system - set_fact: + ansible.builtin.set_fact: virtualenv: "{{ ansible_python_interpreter | dirname | dirname }}" when: - ansible_python_interpreter is defined @@ -17,7 +17,7 @@ - not ansible_python_interpreter.startswith('/usr/bin/') - name: Ensure Python PyYAML and hvac modules are installed - pip: + ansible.builtin.pip: name: - PyYAML - hvac @@ -27,13 +27,14 @@ become: "{{ virtualenv is not defined }}" - name: Ensure vault directory exists in Kayobe configuration - file: + ansible.builtin.file: path: "{{ kayobe_env_config_path }}/vault/" state: directory delegate_to: localhost run_once: true - - import_role: + - name: Apply vault role + ansible.builtin.import_role: name: stackhpc.hashicorp.vault vars: hashicorp_registry_url: "{{ seed_hashicorp_registry_url }}" @@ -41,30 +42,32 @@ hashicorp_registry_password: "{{ seed_hashicorp_registry_password }}" consul_docker_image: "{{ seed_consul_docker_image }}" consul_docker_tag: "{{ seed_consul_docker_tag }}" - vault_config_dir: "/opt/kayobe/vault" - vault_cluster_name: "seed" + vault_config_dir: /opt/kayobe/vault + vault_cluster_name: seed vault_docker_image: "{{ seed_vault_docker_image }}" vault_docker_tag: "{{ seed_vault_docker_tag }}" vault_write_keys_file: true vault_write_keys_file_path: "{{ kayobe_env_config_path }}/vault/seed-vault-keys.json" - name: Include Vault keys - include_vars: + ansible.builtin.include_vars: file: "{{ kayobe_env_config_path }}/vault/seed-vault-keys.json" name: vault_keys - - import_role: + - name: Apply vault unseal role + ansible.builtin.import_role: name: stackhpc.hashicorp.vault_unseal vars: vault_unseal_keys: "{{ vault_keys.keys_base64 }}" - - import_role: + - name: Apply vault PKI role + ansible.builtin.import_role: name: stackhpc.hashicorp.vault_pki vars: vault_token: "{{ vault_keys.root_token }}" - vault_pki_root_ca_name: "OS-TLS-ROOT" + vault_pki_root_ca_name: OS-TLS-ROOT vault_pki_write_root_ca_to_file: true - vault_pki_intermediate_ca_name: "OS-TLS-INT" + vault_pki_intermediate_ca_name: OS-TLS-INT vault_pki_intermediate_export: true vault_pki_intermediate_roles: "{{ seed_vault_pki_roles }}" vault_pki_certificates_directory: "{{ kayobe_env_config_path }}/vault" @@ -74,7 +77,8 @@ - common_name: "{% if kolla_internal_fqdn != kolla_internal_vip_address %}{{ kolla_internal_fqdn }}{% else %}overcloud{% endif %}" role: "{{ seed_vault_pki_role_name }}" extra_params: - ip_sans: "{% for host in groups['controllers'] %}{{ internal_net_name | net_ip(host) }}{% if not loop.last %},{% endif %}{% endfor %},{{ kolla_internal_vip_address }}" + ip_sans: "{% for host in groups['controllers'] %}{{ internal_net_name | net_ip(host) }}\ + {% if not loop.last %},{% endif %}{% endfor %},{{ kolla_internal_vip_address }}" vault_pki_write_certificate_files: true vault_pki_write_pem_bundle: false vault_pki_write_int_ca_to_file: true diff --git a/etc/kayobe/ansible/vault-generate-backend-tls.yml b/etc/kayobe/ansible/vault-generate-backend-tls.yml index 5603f1991..71f243c85 100644 --- a/etc/kayobe/ansible/vault-generate-backend-tls.yml +++ b/etc/kayobe/ansible/vault-generate-backend-tls.yml @@ -5,24 +5,24 @@ hosts: controllers:network tasks: - name: Copy the intermediate CA - copy: + ansible.builtin.copy: src: "{{ kayobe_env_config_path }}/vault/OS-TLS-ROOT.pem" - dest: "{{ '/etc/pki/ca-trust/source/anchors/OS-TLS-ROOT.crt' if ansible_facts.os_family == 'RedHat' else '/usr/local/share/ca-certificates/OS-TLS-ROOT.crt' }}" - mode: 0644 + dest: "{{ '/etc/pki/ca-trust/source/anchors/OS-TLS-ROOT.crt' if ansible_facts.os_family == 'RedHat' \ + else '/usr/local/share/ca-certificates/OS-TLS-ROOT.crt' }}" + mode: "0644" become: true - - name: update system CA + - name: Update system CA become: true - shell: "{{ 'update-ca-trust' if ansible_facts.os_family == 'RedHat' else 'update-ca-certificates' }}" - + ansible.builtin.command: "{{ 'update-ca-trust' if ansible_facts.os_family == 'RedHat' else 'update-ca-certificates' }}" - name: Generate backend API certificates hosts: controllers:network vars: - vault_api_addr: "https://{{ internal_net_name | net_ip(groups['controllers'][0]) }}:8200" - vault_intermediate_ca_name: "OS-TLS-INT" + vault_api_addr: https://{{ internal_net_name | net_ip(groups['controllers'][0]) }}:8200 + vault_intermediate_ca_name: OS-TLS-INT tasks: - name: Set a fact about the virtualenv on the remote system - set_fact: + ansible.builtin.set_fact: virtualenv: "{{ ansible_python_interpreter | dirname | dirname }}" when: - ansible_python_interpreter is defined @@ -30,7 +30,7 @@ - not ansible_python_interpreter.startswith('/usr/bin/') - name: Ensure Python hvac module is installed - pip: + ansible.builtin.pip: name: hvac state: latest extra_args: "{% if pip_upper_constraints_file %}-c {{ pip_upper_constraints_file }}{% endif %}" @@ -38,7 +38,7 @@ become: "{{ virtualenv is not defined }}" - name: Include Vault keys - include_vars: + ansible.builtin.include_vars: file: "{{ kayobe_env_config_path }}/vault/overcloud-vault-keys.json" name: vault_keys @@ -54,29 +54,29 @@ ip_sans: "{{ internal_net_name | net_ip }}" register: backend_cert environment: - https_proxy: '' + https_proxy: "" - name: Ensure certificates directory exists - file: + ansible.builtin.file: path: "{{ kayobe_env_config_path }}/kolla/certificates" state: directory delegate_to: localhost - name: Copy backend cert no_log: true - copy: + ansible.builtin.copy: dest: "{{ kayobe_env_config_path }}/kolla/certificates/{{ inventory_hostname }}-cert.pem" content: | {{ backend_cert.data.certificate }} {{ backend_cert.data.issuing_ca }} - mode: 0600 + mode: "0600" delegate_to: localhost - name: Copy backend key no_log: true - copy: + ansible.builtin.copy: dest: "{{ kayobe_env_config_path }}/kolla/certificates/{{ inventory_hostname }}-key.pem" content: | {{ backend_cert.data.private_key }} - mode: 0600 + mode: "0600" delegate_to: localhost diff --git a/etc/kayobe/ansible/vault-generate-internal-tls.yml b/etc/kayobe/ansible/vault-generate-internal-tls.yml index a1dc2303b..d5d4e6068 100644 --- a/etc/kayobe/ansible/vault-generate-internal-tls.yml +++ b/etc/kayobe/ansible/vault-generate-internal-tls.yml @@ -3,11 +3,11 @@ hosts: controllers run_once: true vars: - vault_api_addr: "https://{{ internal_net_name | net_ip }}:8200" - vault_intermediate_ca_name: "OS-TLS-INT" + vault_api_addr: https://{{ internal_net_name | net_ip }}:8200 + vault_intermediate_ca_name: OS-TLS-INT tasks: - name: Include Vault keys - include_vars: + ansible.builtin.include_vars: file: "{{ kayobe_env_config_path }}/vault/overcloud-vault-keys.json" name: vault_keys @@ -23,34 +23,34 @@ ip_sans: "{{ kolla_internal_vip_address }}" register: internal_cert environment: - https_proxy: '' + https_proxy: "" - name: Ensure certificates directory exists - file: + ansible.builtin.file: path: "{{ kayobe_env_config_path }}/kolla/certificates" state: directory delegate_to: localhost - name: Ensure CA certificates directory exists - file: + ansible.builtin.file: path: "{{ kayobe_env_config_path }}/kolla/certificates/ca" state: directory delegate_to: localhost - name: Copy internal API PEM bundle no_log: true - copy: + ansible.builtin.copy: dest: "{{ kayobe_env_config_path }}/kolla/certificates/haproxy-internal.pem" content: | {{ internal_cert.data.certificate }} {{ internal_cert.data.issuing_ca }} {{ internal_cert.data.private_key }} - mode: 0600 + mode: "0600" delegate_to: localhost - name: Copy root CA - copy: + ansible.builtin.copy: src: "{{ kayobe_env_config_path }}/vault/OS-TLS-ROOT.pem" dest: "{{ kayobe_env_config_path }}/kolla/certificates/ca/vault.crt" - mode: 0600 + mode: "0600" delegate_to: localhost diff --git a/etc/kayobe/ansible/vault-generate-test-external-tls.yml b/etc/kayobe/ansible/vault-generate-test-external-tls.yml index 1dc40d443..37841ad0d 100644 --- a/etc/kayobe/ansible/vault-generate-test-external-tls.yml +++ b/etc/kayobe/ansible/vault-generate-test-external-tls.yml @@ -3,12 +3,12 @@ hosts: controllers run_once: true vars: - vault_api_addr: "https://{{ internal_net_name | net_ip }}:8200" + vault_api_addr: https://{{ internal_net_name | net_ip }}:8200 # NOTE: Using the same CA as internal TLS. - vault_intermediate_ca_name: "OS-TLS-INT" + vault_intermediate_ca_name: OS-TLS-INT tasks: - name: Include Vault keys - include_vars: + ansible.builtin.include_vars: file: "{{ kayobe_env_config_path }}/vault/overcloud-vault-keys.json" name: vault_keys @@ -24,34 +24,34 @@ ip_sans: "{{ kolla_external_vip_address }}" register: external_cert environment: - https_proxy: '' + https_proxy: "" - name: Ensure certificates directory exists - file: + ansible.builtin.file: path: "{{ kayobe_env_config_path }}/kolla/certificates" state: directory delegate_to: localhost - name: Ensure CA certificates directory exists - file: + ansible.builtin.file: path: "{{ kayobe_env_config_path }}/kolla/certificates/ca" state: directory delegate_to: localhost - name: Copy external API PEM bundle no_log: true - copy: + ansible.builtin.copy: dest: "{{ kayobe_env_config_path }}/kolla/certificates/haproxy.pem" content: | {{ external_cert.data.certificate }} {{ external_cert.data.issuing_ca }} {{ external_cert.data.private_key }} - mode: 0600 + mode: "0600" delegate_to: localhost - name: Copy root CA - copy: + ansible.builtin.copy: src: "{{ kayobe_env_config_path }}/vault/OS-TLS-ROOT.pem" dest: "{{ kayobe_env_config_path }}/kolla/certificates/ca/vault.crt" - mode: 0600 + mode: "0600" delegate_to: localhost diff --git a/etc/kayobe/ansible/vault-unseal-overcloud.yml b/etc/kayobe/ansible/vault-unseal-overcloud.yml index 0d6730a66..11cc600d9 100644 --- a/etc/kayobe/ansible/vault-unseal-overcloud.yml +++ b/etc/kayobe/ansible/vault-unseal-overcloud.yml @@ -5,7 +5,7 @@ hosts: controllers tasks: - name: Set a fact about the virtualenv on the remote system - set_fact: + ansible.builtin.set_fact: virtualenv: "{{ ansible_python_interpreter | dirname | dirname }}" when: - ansible_python_interpreter is defined @@ -13,7 +13,7 @@ - not ansible_python_interpreter.startswith('/usr/bin/') - name: Ensure Python hvac module is installed - pip: + ansible.builtin.pip: name: hvac state: latest extra_args: "{% if pip_upper_constraints_file %}-c {{ pip_upper_constraints_file }}{% endif %}" @@ -21,16 +21,18 @@ become: "{{ virtualenv is not defined }}" - name: Include Vault keys - include_vars: + ansible.builtin.include_vars: file: "{{ kayobe_env_config_path }}/vault/overcloud-vault-keys.json" name: vault_keys - - import_role: + - name: Apply vault unseal role + ansible.builtin.import_role: name: stackhpc.hashicorp.vault_unseal vars: - vault_api_addr: "https://{{ internal_net_name | net_ip }}:8200" + vault_api_addr: https://{{ internal_net_name | net_ip }}:8200 vault_unseal_token: "{{ vault_keys.root_token }}" - vault_unseal_ca_cert: "{{ '/etc/pki/tls/certs/ca-bundle.crt' if ansible_facts.os_family == 'RedHat' else '/usr/local/share/ca-certificates/OS-TLS-ROOT.crt' }}" + vault_unseal_ca_cert: "{{ '/etc/pki/tls/certs/ca-bundle.crt' if ansible_facts.os_family == 'RedHat' \ + else '/usr/local/share/ca-certificates/OS-TLS-ROOT.crt' }}" vault_unseal_keys: "{{ vault_keys.keys_base64 }}" environment: - https_proxy: '' + https_proxy: "" diff --git a/etc/kayobe/ansible/vault-unseal-seed.yml b/etc/kayobe/ansible/vault-unseal-seed.yml index 38c5ea737..3c6428b44 100644 --- a/etc/kayobe/ansible/vault-unseal-seed.yml +++ b/etc/kayobe/ansible/vault-unseal-seed.yml @@ -1,13 +1,13 @@ --- - name: Deploy Hashicorp Vault on the seed - any_errors_fatal: True - gather_facts: True + any_errors_fatal: true + gather_facts: true hosts: seed vars: - vault_api_addr: "http://127.0.0.1:8200" + vault_api_addr: http://127.0.0.1:8200 tasks: - name: Set a fact about the virtualenv on the remote system - set_fact: + ansible.builtin.set_fact: virtualenv: "{{ ansible_python_interpreter | dirname | dirname }}" when: - ansible_python_interpreter is defined @@ -15,7 +15,7 @@ - not ansible_python_interpreter.startswith('/usr/bin/') - name: Ensure Python hvac module is installed - pip: + ansible.builtin.pip: name: hvac state: latest extra_args: "{% if pip_upper_constraints_file %}-c {{ pip_upper_constraints_file }}{% endif %}" @@ -23,11 +23,12 @@ become: "{{ virtualenv is not defined }}" - name: Include Vault keys - include_vars: + ansible.builtin.include_vars: file: "{{ kayobe_env_config_path }}/vault/seed-vault-keys.json" name: vault_keys - - import_role: + - name: Apply vault unseal role + ansible.builtin.import_role: name: stackhpc.hashicorp.vault_unseal vars: vault_unseal_keys: "{{ vault_keys.keys_base64 }}" diff --git a/etc/kayobe/ansible/wazuh-agent.yml b/etc/kayobe/ansible/wazuh-agent.yml index 730828604..489191262 100644 --- a/etc/kayobe/ansible/wazuh-agent.yml +++ b/etc/kayobe/ansible/wazuh-agent.yml @@ -1,29 +1,30 @@ --- - name: Deploy Wazuh agent hosts: wazuh-agent - become: yes + become: true tasks: - - import_role: - name: "wazuh-ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-agent" + - name: Apply Wazuh agent role + ansible.builtin.import_role: + name: wazuh-ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-agent post_tasks: - name: Check if custom SCA policies directory exists - stat: + ansible.builtin.stat: path: "{{ local_custom_sca_policies_path }}" register: custom_sca_policies_folder delegate_to: localhost - name: Gather list of custom SCA policies - find: + ansible.builtin.find: paths: "{{ local_custom_sca_policies_path }}" - patterns: '*.yml' + patterns: "*.yml" delegate_to: localhost register: custom_sca_policies when: custom_sca_policies_folder.stat.exists - name: Allow Wazuh agents to execute commands in SCA policies sent from the Wazuh manager - become: yes - blockinfile: - path: "/var/ossec/etc/local_internal_options.conf" + become: true + ansible.builtin.blockinfile: + path: /var/ossec/etc/local_internal_options.conf state: present owner: wazuh group: wazuh @@ -36,6 +37,6 @@ handlers: - name: Restart wazuh-agent - service: + ansible.builtin.service: name: wazuh-agent state: restarted diff --git a/etc/kayobe/ansible/wazuh-manager.yml b/etc/kayobe/ansible/wazuh-manager.yml index b75ed261a..404a96970 100644 --- a/etc/kayobe/ansible/wazuh-manager.yml +++ b/etc/kayobe/ansible/wazuh-manager.yml @@ -5,9 +5,14 @@ tags: - prechecks tasks: - - block: + - name: Generate certificates + when: + - groups["wazuh-manager"] | length > 0 + + # Certificates generation + block: - name: Fail if using old path for Wazuh certificates - fail: + ansible.builtin.fail: msg: >- The path used for Wazuh SSL certificates was changed in a previous release. The certificates were found in the wrong location. Please move all files and directories in @@ -17,7 +22,7 @@ - (playbook_dir ~ '/wazuh/certificates') is exists - name: Fail if using old path for custom certificates - fail: + ansible.builtin.fail: msg: >- Wazuh custom SSL certificates have been merged with regular certificates. The certificates were found in the wrong location. Please move them from {{ playbook_dir }}/wazuh/custom_certificates @@ -26,52 +31,54 @@ - (playbook_dir ~ '/wazuh/custom_certificates') is exists - name: Check that removed variable, local_custom_certs_path, is not set - assert: + ansible.builtin.assert: that: local_custom_certs_path is not defined - fail_msg: "The variable, `local_custom_certs_path`, is no longer used. Please remove this variable." - when: - - groups["wazuh-manager"] | length > 0 - -# Certificates generation -- hosts: localhost + fail_msg: The variable, `local_custom_certs_path`, is no longer used. Please remove this variable. +- name: Apply wazuh indexer role + hosts: localhost roles: - - role: "wazuh-ansible/wazuh-ansible/roles/wazuh/wazuh-indexer" + - role: wazuh-ansible/wazuh-ansible/roles/wazuh/wazuh-indexer perform_installation: false - become: no + become: false tags: - generate-certs # Single node -- hosts: wazuh-manager - become: yes +- name: Install Wazuh manager + hosts: wazuh-manager + become: true become_user: root roles: - - role: "wazuh-ansible/wazuh-ansible/roles/wazuh/wazuh-indexer" - - role: "wazuh-ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager" - - role: "wazuh-ansible/wazuh-ansible/roles/wazuh/ansible-filebeat-oss" - - role: "wazuh-ansible/wazuh-ansible/roles/wazuh/wazuh-dashboard" + - role: wazuh-ansible/wazuh-ansible/roles/wazuh/wazuh-indexer + - role: wazuh-ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager + - role: wazuh-ansible/wazuh-ansible/roles/wazuh/ansible-filebeat-oss + - role: wazuh-ansible/wazuh-ansible/roles/wazuh/wazuh-dashboard post_tasks: - - block: + - name: Use custom SCA policies + notify: + - Restart wazuh + + block: - name: Check if custom SCA policies directory exists - stat: + ansible.builtin.stat: path: "{{ local_custom_sca_policies_path }}" register: custom_sca_policies_folder delegate_to: localhost - become: no + become: false - name: Gather list of custom SCA policies - find: + ansible.builtin.find: paths: "{{ local_custom_sca_policies_path }}" - patterns: '*.yml' + patterns: "*.yml" delegate_to: localhost register: custom_sca_policies when: custom_sca_policies_folder.stat.exists - become: no + become: false - name: Copy custom SCA policy files to Wazuh manager - copy: + ansible.builtin.copy: # Note the trailing slash to copy directory contents src: "{{ local_custom_sca_policies_path }}/" - dest: "/var/ossec/etc/shared/default/" + dest: /var/ossec/etc/shared/default/ owner: wazuh group: wazuh when: @@ -79,13 +86,13 @@ - custom_sca_policies.files | length > 0 - name: Add custom policy definition(s) to the shared Agent config - blockinfile: - path: "/var/ossec/etc/shared/default/agent.conf" + ansible.builtin.blockinfile: + path: /var/ossec/etc/shared/default/agent.conf state: present owner: wazuh group: wazuh marker: "{mark} ANSIBLE MANAGED BLOCK Custom SCA Policies" - insertafter: "" + insertafter: block: | {% filter indent(width=2, first=true) %} @@ -99,30 +106,27 @@ when: - custom_sca_policies_folder.stat.exists - custom_sca_policies.files | length > 0 - notify: - - Restart wazuh - - name: Set http/s_proxy vars in ossec-init.conf for vulnerability detector - blockinfile: - path: "/var/ossec/etc/ossec.conf" + ansible.builtin.blockinfile: + path: /var/ossec/etc/ossec.conf state: present owner: root group: ossec block: | HTTPS_PROXY={{ http_proxy_url }} HTTP_PROXY={{ http_proxy_url }} - backup: yes + backup: true when: http_proxy_url is defined notify: - Restart wazuh - name: Perform health check against filebeat - command: filebeat test output + ansible.builtin.command: filebeat test output changed_when: false retries: 2 handlers: - name: Restart wazuh - service: + ansible.builtin.service: name: wazuh-manager state: restarted diff --git a/etc/kayobe/ansible/wazuh-secrets.yml b/etc/kayobe/ansible/wazuh-secrets.yml index 16b0a09f7..fbaa3d3e9 100644 --- a/etc/kayobe/ansible/wazuh-secrets.yml +++ b/etc/kayobe/ansible/wazuh-secrets.yml @@ -1,28 +1,29 @@ --- -- hosts: localhost +- name: Wazuh secrets + hosts: localhost gather_facts: false vars: wazuh_secrets_path: "{{ kayobe_env_config_path }}/wazuh-secrets.yml" override_special_characters: '"#$%&()*+,-./:;<=>?@[\]^_{|}~' tasks: - - name: install passlib[bcrypt] - pip: + - name: Install passlib[bcrypt] + ansible.builtin.pip: name: passlib[bcrypt] virtualenv: "{{ ansible_playbook_python | dirname | dirname }}" - name: Ensure secrets directory exists - file: + ansible.builtin.file: path: "{{ wazuh_secrets_path | dirname }}" state: directory - name: Template new secrets - no_log: True - template: + no_log: true + ansible.builtin.template: src: wazuh-secrets.yml.j2 dest: "{{ wazuh_secrets_path }}" - name: In-place encrypt wazuh-secrets - copy: + ansible.builtin.copy: content: "{{ lookup('ansible.builtin.file', wazuh_secrets_path) | ansible.builtin.vault(ansible_vault_password) }}" dest: "{{ wazuh_secrets_path }}" decrypt: false From 9f84bc79dc72e03f0b810baf2af48048864ff7e2 Mon Sep 17 00:00:00 2001 From: Alex-Welsh Date: Thu, 7 Nov 2024 15:42:00 +0000 Subject: [PATCH 05/13] Ansible-lint revert line-length limit --- .github/workflows/stackhpc-pull-request.yml | 2 +- etc/kayobe/ansible/build-ofed-rocky.yml | 3 +-- .../ansible/deploy-os-capacity-exporter.yml | 18 ++++++------------ etc/kayobe/ansible/run-container-hotfix.yml | 3 +-- etc/kayobe/ansible/vault-deploy-overcloud.yml | 3 +-- etc/kayobe/ansible/vault-deploy-seed.yml | 3 +-- etc/kayobe/ansible/vault-unseal-overcloud.yml | 3 +-- 7 files changed, 12 insertions(+), 23 deletions(-) diff --git a/.github/workflows/stackhpc-pull-request.yml b/.github/workflows/stackhpc-pull-request.yml index 0af02d5ca..fb6786d9c 100644 --- a/.github/workflows/stackhpc-pull-request.yml +++ b/.github/workflows/stackhpc-pull-request.yml @@ -96,7 +96,7 @@ jobs: - name: Linting code ๐Ÿงช run: | - ansible-lint -v --force-color -x no-changed-when,risky-file-permissions,run-once,name[template],package-latest,yaml,role-name[path] etc/kayobe/ansible/. + ansible-lint -v --force-color -x no-changed-when,risky-file-permissions,run-once,name[template],package-latest,yaml,role-name[path],yaml[line-length] etc/kayobe/ansible/. # A skipped job is treated as success when used as a required status check. # The registered required status checks refer to the name of the job in the diff --git a/etc/kayobe/ansible/build-ofed-rocky.yml b/etc/kayobe/ansible/build-ofed-rocky.yml index 6263d0ba6..d7e925547 100644 --- a/etc/kayobe/ansible/build-ofed-rocky.yml +++ b/etc/kayobe/ansible/build-ofed-rocky.yml @@ -42,8 +42,7 @@ - name: Add DOCA host repository package ansible.builtin.dnf: - name: "https://developer.nvidia.com/downloads/networking/secure/doca-sdk/DOCA_2.8/doca-host-2.8.0-204000_\ - {{ stackhpc_pulp_doca_ofed_version }}_rhel9{{ stackhpc_pulp_repo_rocky_9_minor_version }}.x86_64.rpm" + name: "https://developer.nvidia.com/downloads/networking/secure/doca-sdk/DOCA_2.8/doca-host-2.8.0-204000_{{ stackhpc_pulp_doca_ofed_version }}_rhel9{{ stackhpc_pulp_repo_rocky_9_minor_version }}.x86_64.rpm" disable_gpg_check: true - name: Install DOCA extra packages diff --git a/etc/kayobe/ansible/deploy-os-capacity-exporter.yml b/etc/kayobe/ansible/deploy-os-capacity-exporter.yml index e60b1bba1..b11b5dd1a 100644 --- a/etc/kayobe/ansible/deploy-os-capacity-exporter.yml +++ b/etc/kayobe/ansible/deploy-os-capacity-exporter.yml @@ -31,18 +31,12 @@ - name: Set facts for admin credentials ansible.builtin.set_fact: - stackhpc_os_capacity_auth_url: "{{ credential.stdout_lines | select('match', '.*OS_AUTH_URL*.') \ - | first | split('=') | last | replace(\"'\", '') }}" - stackhpc_os_capacity_project_name: "{{ credential.stdout_lines | select('match', '.*OS_PROJECT_NAME*.') \ - | first | split('=') | last | replace(\"'\", '') }}" - stackhpc_os_capacity_domain_name: "{{ credential.stdout_lines | select('match', '.*OS_PROJECT_DOMAIN_NAME*.') \ - | first | split('=') | last | replace(\"'\", '') }}" - stackhpc_os_capacity_openstack_region_name: "{{ credential.stdout_lines | select('match', '.*OS_REGION_NAME*.') \ - | first | split('=') | last | replace(\"'\", '') }}" - stackhpc_os_capacity_username: "{{ credential.stdout_lines | select('match', '.*OS_USERNAME*.') \ - | first | split('=') | last | replace(\"'\", '') }}" - stackhpc_os_capacity_password: "{{ credential.stdout_lines | select('match', '.*OS_PASSWORD*.') \ - | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_auth_url: "{{ credential.stdout_lines | select('match', '.*OS_AUTH_URL*.') | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_project_name: "{{ credential.stdout_lines | select('match', '.*OS_PROJECT_NAME*.') | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_domain_name: "{{ credential.stdout_lines | select('match', '.*OS_PROJECT_DOMAIN_NAME*.') | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_openstack_region_name: "{{ credential.stdout_lines | select('match', '.*OS_REGION_NAME*.') | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_username: "{{ credential.stdout_lines | select('match', '.*OS_USERNAME*.') | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_password: "{{ credential.stdout_lines | select('match', '.*OS_PASSWORD*.') | first | split('=') | last | replace(\"'\", '') }}" when: stackhpc_enable_os_capacity - name: Template clouds.yml diff --git a/etc/kayobe/ansible/run-container-hotfix.yml b/etc/kayobe/ansible/run-container-hotfix.yml index 8636a85b6..ca175aff8 100644 --- a/etc/kayobe/ansible/run-container-hotfix.yml +++ b/etc/kayobe/ansible/run-container-hotfix.yml @@ -21,6 +21,5 @@ index_var: index - name: Run container_hotfix_command - ansible.builtin.command: "{{ kolla_container_engine | default('docker') }} exec \ - {{ '-u 0' if container_hotfix_become else '' }} {{ hotfix_container }} {{ container_hotfix_command }}" + ansible.builtin.command: "{{ kolla_container_engine | default('docker') }} exec {{ '-u 0' if container_hotfix_become else '' }} {{ hotfix_container }} {{ container_hotfix_command }}" when: container_hotfix_command diff --git a/etc/kayobe/ansible/vault-deploy-overcloud.yml b/etc/kayobe/ansible/vault-deploy-overcloud.yml index fc87b9e5b..29c0e39a7 100644 --- a/etc/kayobe/ansible/vault-deploy-overcloud.yml +++ b/etc/kayobe/ansible/vault-deploy-overcloud.yml @@ -89,8 +89,7 @@ vars: vault_api_addr: https://{{ internal_net_name | net_ip }}:8200 vault_unseal_token: "{{ vault_keys.root_token }}" - vault_unseal_ca_cert: "{{ '/etc/pki/tls/certs/ca-bundle.crt' if ansible_facts.os_family == 'RedHat' \ - else '/usr/local/share/ca-certificates/OS-TLS-ROOT.crt' }}" + vault_unseal_ca_cert: "{{ '/etc/pki/tls/certs/ca-bundle.crt' if ansible_facts.os_family == 'RedHat' else '/usr/local/share/ca-certificates/OS-TLS-ROOT.crt' }}" vault_unseal_keys: "{{ vault_keys.keys_base64 }}" environment: https_proxy: "" diff --git a/etc/kayobe/ansible/vault-deploy-seed.yml b/etc/kayobe/ansible/vault-deploy-seed.yml index a74557f63..9617aa905 100644 --- a/etc/kayobe/ansible/vault-deploy-seed.yml +++ b/etc/kayobe/ansible/vault-deploy-seed.yml @@ -77,8 +77,7 @@ - common_name: "{% if kolla_internal_fqdn != kolla_internal_vip_address %}{{ kolla_internal_fqdn }}{% else %}overcloud{% endif %}" role: "{{ seed_vault_pki_role_name }}" extra_params: - ip_sans: "{% for host in groups['controllers'] %}{{ internal_net_name | net_ip(host) }}\ - {% if not loop.last %},{% endif %}{% endfor %},{{ kolla_internal_vip_address }}" + ip_sans: "{% for host in groups['controllers'] %}{{ internal_net_name | net_ip(host) }}{% if not loop.last %},{% endif %}{% endfor %},{{ kolla_internal_vip_address }}" vault_pki_write_certificate_files: true vault_pki_write_pem_bundle: false vault_pki_write_int_ca_to_file: true diff --git a/etc/kayobe/ansible/vault-unseal-overcloud.yml b/etc/kayobe/ansible/vault-unseal-overcloud.yml index 11cc600d9..d5d38678e 100644 --- a/etc/kayobe/ansible/vault-unseal-overcloud.yml +++ b/etc/kayobe/ansible/vault-unseal-overcloud.yml @@ -31,8 +31,7 @@ vars: vault_api_addr: https://{{ internal_net_name | net_ip }}:8200 vault_unseal_token: "{{ vault_keys.root_token }}" - vault_unseal_ca_cert: "{{ '/etc/pki/tls/certs/ca-bundle.crt' if ansible_facts.os_family == 'RedHat' \ - else '/usr/local/share/ca-certificates/OS-TLS-ROOT.crt' }}" + vault_unseal_ca_cert: "{{ '/etc/pki/tls/certs/ca-bundle.crt' if ansible_facts.os_family == 'RedHat' else '/usr/local/share/ca-certificates/OS-TLS-ROOT.crt' }}" vault_unseal_keys: "{{ vault_keys.keys_base64 }}" environment: https_proxy: "" From 321acad9afe3fd716b4f879b964ac46c03b3674b Mon Sep 17 00:00:00 2001 From: Alex-Welsh Date: Fri, 8 Nov 2024 09:14:41 +0000 Subject: [PATCH 06/13] ansible-lint - Apply suggestions from code review --- etc/kayobe/ansible/cephadm.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/etc/kayobe/ansible/cephadm.yml b/etc/kayobe/ansible/cephadm.yml index 8af7ea022..143a40cd3 100644 --- a/etc/kayobe/ansible/cephadm.yml +++ b/etc/kayobe/ansible/cephadm.yml @@ -2,15 +2,21 @@ # Deploy Ceph via Cephadm. Create EC profiles, CRUSH rules, pools and keys. - name: Import Cephadm deploy playbook import_playbook: cephadm-deploy.yml + - name: Import Cephadm commands pre playbook import_playbook: cephadm-commands-pre.yml + - name: Import Cephadm ec profiles playbook import_playbook: cephadm-ec-profiles.yml + - name: Import Cephadm crush rules playbook import_playbook: cephadm-crush-rules.yml + - name: Import Cephadm pools playbook import_playbook: cephadm-pools.yml + - name: Import Cephadm keys playbook import_playbook: cephadm-keys.yml + - name: Import Cephadm commands post playbook import_playbook: cephadm-commands-post.yml From c81275f7ecb215bf1ea9479d4e09bd72a64f953e Mon Sep 17 00:00:00 2001 From: Alex-Welsh <112560678+Alex-Welsh@users.noreply.github.com> Date: Mon, 2 Dec 2024 10:45:04 +0000 Subject: [PATCH 07/13] Apply suggestions from code review Co-authored-by: Matt Crees --- .ansible-lint-ignore | 2 +- .github/workflows/stackhpc-pull-request.yml | 2 +- etc/kayobe/ansible/cephadm-commands-post.yml | 2 +- etc/kayobe/ansible/cephadm-commands-pre.yml | 2 +- etc/kayobe/ansible/cephadm.yml | 2 +- etc/kayobe/ansible/fix-houston.yml | 3 ++- etc/kayobe/ansible/pulp-repo-promote-production.yml | 2 +- etc/kayobe/ansible/rabbitmq-reset.yml | 6 +++++- etc/kayobe/ansible/smartmon-tools.yml | 2 +- etc/kayobe/ansible/stackhpc-openstack-tests.yml | 2 +- etc/kayobe/ansible/vault-deploy-overcloud.yml | 4 ++-- etc/kayobe/ansible/wazuh-manager.yml | 3 ++- 12 files changed, 19 insertions(+), 13 deletions(-) diff --git a/.ansible-lint-ignore b/.ansible-lint-ignore index 3891e8f9a..aa444047f 100644 --- a/.ansible-lint-ignore +++ b/.ansible-lint-ignore @@ -1,4 +1,4 @@ -# This file contains ignores rule violations for ansible-lint +# This file contains ignores to rule violations for ansible-lint etc/kayobe/ansible/vault-deploy-barbican.yml fqcn[action-core] etc/kayobe/ansible/vault-generate-backend-tls.yml fqcn[action-core] etc/kayobe/ansible/vault-generate-internal-tls.yml fqcn[action-core] diff --git a/.github/workflows/stackhpc-pull-request.yml b/.github/workflows/stackhpc-pull-request.yml index fb6786d9c..90a282f27 100644 --- a/.github/workflows/stackhpc-pull-request.yml +++ b/.github/workflows/stackhpc-pull-request.yml @@ -96,7 +96,7 @@ jobs: - name: Linting code ๐Ÿงช run: | - ansible-lint -v --force-color -x no-changed-when,risky-file-permissions,run-once,name[template],package-latest,yaml,role-name[path],yaml[line-length] etc/kayobe/ansible/. + ansible-lint -v --force-color etc/kayobe/ansible/. # A skipped job is treated as success when used as a required status check. # The registered required status checks refer to the name of the job in the diff --git a/etc/kayobe/ansible/cephadm-commands-post.yml b/etc/kayobe/ansible/cephadm-commands-post.yml index c53c4d0ce..6bd083321 100644 --- a/etc/kayobe/ansible/cephadm-commands-post.yml +++ b/etc/kayobe/ansible/cephadm-commands-post.yml @@ -7,7 +7,7 @@ - cephadm - cephadm-commands tasks: - - name: Apply Cephadm role + - name: Apply Cephadm commands role ansible.builtin.import_role: name: stackhpc.cephadm.commands vars: diff --git a/etc/kayobe/ansible/cephadm-commands-pre.yml b/etc/kayobe/ansible/cephadm-commands-pre.yml index b59cf5162..01969ed73 100644 --- a/etc/kayobe/ansible/cephadm-commands-pre.yml +++ b/etc/kayobe/ansible/cephadm-commands-pre.yml @@ -7,7 +7,7 @@ - cephadm - cephadm-commands tasks: - - name: Apply Cephadm role + - name: Apply Cephadm commands role ansible.builtin.import_role: name: stackhpc.cephadm.commands vars: diff --git a/etc/kayobe/ansible/cephadm.yml b/etc/kayobe/ansible/cephadm.yml index 143a40cd3..b3c02654b 100644 --- a/etc/kayobe/ansible/cephadm.yml +++ b/etc/kayobe/ansible/cephadm.yml @@ -6,7 +6,7 @@ - name: Import Cephadm commands pre playbook import_playbook: cephadm-commands-pre.yml -- name: Import Cephadm ec profiles playbook +- name: Import Cephadm EC profiles playbook import_playbook: cephadm-ec-profiles.yml - name: Import Cephadm crush rules playbook diff --git a/etc/kayobe/ansible/fix-houston.yml b/etc/kayobe/ansible/fix-houston.yml index 8df8abc28..e69929650 100644 --- a/etc/kayobe/ansible/fix-houston.yml +++ b/etc/kayobe/ansible/fix-houston.yml @@ -20,6 +20,7 @@ tasks: - name: Include kolla-ansible host vars ansible.builtin.include_vars: "{{ kolla_config_path }}/inventory/overcloud/host_vars/{{ inventory_hostname }}" + - name: Create systemd service for -ovs network interface ansible.builtin.template: src: fix-houston-interface.service.j2 @@ -31,7 +32,7 @@ notify: reload systemd - name: Enable and start systemd service for -ovs network interface - ansible.builtin.systemd: # noqa command-instead-of-module + ansible.builtin.systemd: name: fix-houston-{{ item }} enabled: true state: started diff --git a/etc/kayobe/ansible/pulp-repo-promote-production.yml b/etc/kayobe/ansible/pulp-repo-promote-production.yml index a3e50c726..653716687 100644 --- a/etc/kayobe/ansible/pulp-repo-promote-production.yml +++ b/etc/kayobe/ansible/pulp-repo-promote-production.yml @@ -3,7 +3,7 @@ hosts: localhost gather_facts: true tasks: - - name: Ensure repository publications exist + - name: Ensure repository distributions exist ansible.builtin.import_role: name: stackhpc.pulp.pulp_distribution vars: diff --git a/etc/kayobe/ansible/rabbitmq-reset.yml b/etc/kayobe/ansible/rabbitmq-reset.yml index 405b14830..47fc1c3ff 100644 --- a/etc/kayobe/ansible/rabbitmq-reset.yml +++ b/etc/kayobe/ansible/rabbitmq-reset.yml @@ -31,7 +31,7 @@ register: inspection - name: Ensure the {{ container_name }} container is running - ansible.builtin.command: systemctl start kolla-{{ container_name }}-container.service + ansible.builtin.command: systemctl start kolla-{{ container_name }}-container.service # noqa command-instead-of-module when: inspection.stdout == 'false' - name: Wait for the {{ container_name }} container to reach state 'Running' @@ -48,12 +48,16 @@ - name: Stop app ansible.builtin.command: docker exec {{ container_name }} /bin/bash -c 'rabbitmqctl stop_app' + - name: Force reset app ansible.builtin.command: docker exec {{ container_name }} /bin/bash -c 'rabbitmqctl force_reset' + - name: Start app ansible.builtin.command: docker exec {{ container_name }} /bin/bash -c 'rabbitmqctl start_app' + - name: Wait for all nodes to join the cluster ansible.builtin.command: docker exec {{ container_name }} /bin/bash -c 'rabbitmqctl await_online_nodes {{ groups['controllers'] | length }}' + - name: Restart OpenStack services hosts: controllers:compute become: true diff --git a/etc/kayobe/ansible/smartmon-tools.yml b/etc/kayobe/ansible/smartmon-tools.yml index a8f096439..56888e0c7 100644 --- a/etc/kayobe/ansible/smartmon-tools.yml +++ b/etc/kayobe/ansible/smartmon-tools.yml @@ -1,5 +1,5 @@ --- -- name: Install smartmon-tools +- name: Install and set up smartmon-tools hosts: overcloud tasks: diff --git a/etc/kayobe/ansible/stackhpc-openstack-tests.yml b/etc/kayobe/ansible/stackhpc-openstack-tests.yml index 4e9bf0cff..3094c3d4e 100644 --- a/etc/kayobe/ansible/stackhpc-openstack-tests.yml +++ b/etc/kayobe/ansible/stackhpc-openstack-tests.yml @@ -46,7 +46,7 @@ ansible.builtin.pip: name: - "{{ repo_tmpdir.path }}" - - -r{{ repo_tmpdir.path }}/requirements.txt + - "-r {{ repo_tmpdir.path }}/requirements.txt" - pytest-html - pytest-timeout virtualenv: "{{ sot_venv }}" diff --git a/etc/kayobe/ansible/vault-deploy-overcloud.yml b/etc/kayobe/ansible/vault-deploy-overcloud.yml index 29c0e39a7..31ccf2d2e 100644 --- a/etc/kayobe/ansible/vault-deploy-overcloud.yml +++ b/etc/kayobe/ansible/vault-deploy-overcloud.yml @@ -83,7 +83,7 @@ file: "{{ kayobe_env_config_path }}/vault/overcloud-vault-keys.json" name: vault_keys - - name: Import vault unseal role + - name: Apply vault unseal role ansible.builtin.import_role: name: stackhpc.hashicorp.vault_unseal vars: @@ -99,7 +99,7 @@ gather_facts: true hosts: controllers[0] tasks: - - name: Import vault pki role + - name: Apply vault pki role ansible.builtin.import_role: name: stackhpc.hashicorp.vault_pki vars: diff --git a/etc/kayobe/ansible/wazuh-manager.yml b/etc/kayobe/ansible/wazuh-manager.yml index 404a96970..c9fcdccf6 100644 --- a/etc/kayobe/ansible/wazuh-manager.yml +++ b/etc/kayobe/ansible/wazuh-manager.yml @@ -34,7 +34,8 @@ ansible.builtin.assert: that: local_custom_certs_path is not defined fail_msg: The variable, `local_custom_certs_path`, is no longer used. Please remove this variable. -- name: Apply wazuh indexer role + +- name: Apply Wazuh indexer role hosts: localhost roles: - role: wazuh-ansible/wazuh-ansible/roles/wazuh/wazuh-indexer From 6bc5e8b5dd2599553d08f93b059726065ce2e322 Mon Sep 17 00:00:00 2001 From: Alex-Welsh Date: Mon, 2 Dec 2024 11:19:24 +0000 Subject: [PATCH 08/13] Add .ansible-lint config file --- .ansible-lint | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .ansible-lint diff --git a/.ansible-lint b/.ansible-lint new file mode 100644 index 000000000..a9a2aad61 --- /dev/null +++ b/.ansible-lint @@ -0,0 +1,10 @@ +--- +skip_list: + - no-changed-when + - risky-file-permissions + - run-once + - name[template] + - package-latest + - yaml + - role-name[path] + - yaml[line-length] From 74c5de520c8f9daafec3b1d82e6edafc22aede9c Mon Sep 17 00:00:00 2001 From: Alex-Welsh Date: Mon, 2 Dec 2024 11:28:13 +0000 Subject: [PATCH 09/13] Update ansible-lint after sync --- .../ansible/deploy-os-capacity-exporter.yml | 17 +++++++++-------- etc/kayobe/ansible/fix-grub-rl9.yml | 9 +++++---- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/etc/kayobe/ansible/deploy-os-capacity-exporter.yml b/etc/kayobe/ansible/deploy-os-capacity-exporter.yml index a9cab23dc..8ef4673e1 100644 --- a/etc/kayobe/ansible/deploy-os-capacity-exporter.yml +++ b/etc/kayobe/ansible/deploy-os-capacity-exporter.yml @@ -22,7 +22,9 @@ register: openrc_file_stat run_once: true - - block: + - name: If admin-openrc.sh exists, deploy os-capacity exporter + when: stackhpc_enable_os_capacity and openrc_file_stat.stat.exists + block: - name: Ensure os-capacity directory exists ansible.builtin.file: path: /opt/kayobe/os-capacity/ @@ -37,12 +39,12 @@ - name: Set facts for admin credentials ansible.builtin.set_fact: - stackhpc_os_capacity_auth_url: "{{ credential.stdout_lines | select('match', '.*OS_AUTH_URL*.') | first | split('=') | last | replace(\"'\",'') }}" - stackhpc_os_capacity_project_name: "{{ credential.stdout_lines | select('match', '.*OS_PROJECT_NAME*.') | first | split('=') | last | replace(\"'\",'') }}" - stackhpc_os_capacity_domain_name: "{{ credential.stdout_lines | select('match', '.*OS_PROJECT_DOMAIN_NAME*.') | first | split('=') | last | replace(\"'\",'') }}" - stackhpc_os_capacity_openstack_region_name: "{{ credential.stdout_lines | select('match', '.*OS_REGION_NAME*.') | first | split('=') | last | replace(\"'\",'') }}" - stackhpc_os_capacity_username: "{{ credential.stdout_lines | select('match', '.*OS_USERNAME*.') | first | split('=') | last | replace(\"'\",'') }}" - stackhpc_os_capacity_password: "{{ credential.stdout_lines | select('match', '.*OS_PASSWORD*.') | first | split('=') | last | replace(\"'\",'') }}" + stackhpc_os_capacity_auth_url: "{{ credential.stdout_lines | select('match', '.*OS_AUTH_URL*.') | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_project_name: "{{ credential.stdout_lines | select('match', '.*OS_PROJECT_NAME*.') | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_domain_name: "{{ credential.stdout_lines | select('match', '.*OS_PROJECT_DOMAIN_NAME*.') | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_openstack_region_name: "{{ credential.stdout_lines | select('match', '.*OS_REGION_NAME*.') | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_username: "{{ credential.stdout_lines | select('match', '.*OS_USERNAME*.') | first | split('=') | last | replace(\"'\", '') }}" + stackhpc_os_capacity_password: "{{ credential.stdout_lines | select('match', '.*OS_PASSWORD*.') | first | split('=') | last | replace(\"'\", '') }}" - name: Template clouds.yml ansible.builtin.template: @@ -72,4 +74,3 @@ restart: "{{ clouds_yaml_result is changed or cacert_result is changed }}" restart_policy: unless-stopped become: true - when: stackhpc_enable_os_capacity and openrc_file_stat.stat.exists diff --git a/etc/kayobe/ansible/fix-grub-rl9.yml b/etc/kayobe/ansible/fix-grub-rl9.yml index 8192239e9..3a9a7e1d7 100644 --- a/etc/kayobe/ansible/fix-grub-rl9.yml +++ b/etc/kayobe/ansible/fix-grub-rl9.yml @@ -4,7 +4,11 @@ become: true gather_facts: true tasks: - - block: + - name: Remove "--root-dev-only" from grub.cfg if OS is Rocky Linux 9 + when: + - ansible_facts['distribution'] == 'Rocky' + - ansible_facts['distribution_major_version'] == '9' + block: - name: Check that /boot/efi/EFI/rocky/grub.cfg exists ansible.builtin.stat: path: /boot/efi/EFI/rocky/grub.cfg @@ -16,6 +20,3 @@ regexp: --root-dev-only\s? replace: "" when: stat_result.stat.exists - when: - - ansible_facts['distribution'] == 'Rocky' - - ansible_facts['distribution_major_version'] == '9' From 8a21f6369830316d7df8c17dc8694a10976ca752 Mon Sep 17 00:00:00 2001 From: Alex-Welsh Date: Tue, 3 Dec 2024 09:19:29 +0000 Subject: [PATCH 10/13] Move stray comment line --- etc/kayobe/ansible/wazuh-manager.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/kayobe/ansible/wazuh-manager.yml b/etc/kayobe/ansible/wazuh-manager.yml index c9fcdccf6..341853b3f 100644 --- a/etc/kayobe/ansible/wazuh-manager.yml +++ b/etc/kayobe/ansible/wazuh-manager.yml @@ -9,7 +9,6 @@ when: - groups["wazuh-manager"] | length > 0 - # Certificates generation block: - name: Fail if using old path for Wazuh certificates ansible.builtin.fail: @@ -35,6 +34,7 @@ that: local_custom_certs_path is not defined fail_msg: The variable, `local_custom_certs_path`, is no longer used. Please remove this variable. +# Certificates generation - name: Apply Wazuh indexer role hosts: localhost roles: From f45dcd4b65f8b3586d9c60437987d9f086435157 Mon Sep 17 00:00:00 2001 From: Alex-Welsh Date: Mon, 9 Dec 2024 11:28:56 +0000 Subject: [PATCH 11/13] Fix growroot playbook after ansible-lint changes --- etc/kayobe/ansible/growroot.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/etc/kayobe/ansible/growroot.yml b/etc/kayobe/ansible/growroot.yml index f799c1b05..7930567e5 100644 --- a/etc/kayobe/ansible/growroot.yml +++ b/etc/kayobe/ansible/growroot.yml @@ -31,6 +31,7 @@ tasks: - name: Check LVM status ansible.builtin.shell: + executable: "/bin/bash" cmd: set -o pipefail && vgdisplay | grep -q lvm2 changed_when: false failed_when: false From 8839b2f9a6f7bc780c768a8cd48556a21a7f00e1 Mon Sep 17 00:00:00 2001 From: Alex-Welsh Date: Tue, 10 Dec 2024 16:47:44 +0000 Subject: [PATCH 12/13] Add bash executable for cmd shell invocations --- etc/kayobe/ansible/rabbitmq-reset.yml | 12 +++++++----- etc/kayobe/ansible/stop-openstack-services.yml | 8 +++++--- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/etc/kayobe/ansible/rabbitmq-reset.yml b/etc/kayobe/ansible/rabbitmq-reset.yml index 47fc1c3ff..e910d7765 100644 --- a/etc/kayobe/ansible/rabbitmq-reset.yml +++ b/etc/kayobe/ansible/rabbitmq-reset.yml @@ -67,8 +67,10 @@ tasks: # The following services use RabbitMQ. - name: Restart OpenStack services - ansible.builtin.shell: >- - set -o pipefail && - systemctl -a | egrep 'kolla-(barbican|blazar|cinder|cloudkitty|designate|heat|ironic|keystone|magnum|manila|neutron|nova|octavia)' | - awk '{ print $1 }' | - xargs systemctl restart + ansible.builtin.shell: + cmd: >- + set -o pipefail && + systemctl -a | egrep 'kolla-(barbican|blazar|cinder|cloudkitty|designate|heat|ironic|keystone|magnum|manila|neutron|nova|octavia)' | + awk '{ print $1 }' | + xargs systemctl restart + executable: "/bin/bash" diff --git a/etc/kayobe/ansible/stop-openstack-services.yml b/etc/kayobe/ansible/stop-openstack-services.yml index f7d438094..5318fcf7c 100644 --- a/etc/kayobe/ansible/stop-openstack-services.yml +++ b/etc/kayobe/ansible/stop-openstack-services.yml @@ -25,6 +25,8 @@ - placement tasks: - name: Stop OpenStack services - ansible.builtin.shell: >- - set -o pipefail && - systemctl -a | egrep '({{ stop_service_list | join('|') }})' | awk '{ print $1 }' | xargs systemctl stop + ansible.builtin.shell: + executable: "/bin/bash" + cmd: >- + set -o pipefail && + systemctl -a | egrep '({{ stop_service_list | join('|') }})' | awk '{ print $1 }' | xargs systemctl stop From dcfbf7fbe5e6c7883b40da2765eb37f7ca9804ac Mon Sep 17 00:00:00 2001 From: Alex-Welsh Date: Fri, 13 Dec 2024 17:01:46 +0000 Subject: [PATCH 13/13] Misc minor fixes --- etc/kayobe/ansible/fix-grub-rl9.yml | 2 +- etc/kayobe/ansible/stackhpc-openstack-tests.yml | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/etc/kayobe/ansible/fix-grub-rl9.yml b/etc/kayobe/ansible/fix-grub-rl9.yml index 3a9a7e1d7..6d81f137d 100644 --- a/etc/kayobe/ansible/fix-grub-rl9.yml +++ b/etc/kayobe/ansible/fix-grub-rl9.yml @@ -19,4 +19,4 @@ path: /boot/efi/EFI/rocky/grub.cfg regexp: --root-dev-only\s? replace: "" - when: stat_result.stat.exists + when: stat_result.stat.exists diff --git a/etc/kayobe/ansible/stackhpc-openstack-tests.yml b/etc/kayobe/ansible/stackhpc-openstack-tests.yml index 3094c3d4e..0af91e79f 100644 --- a/etc/kayobe/ansible/stackhpc-openstack-tests.yml +++ b/etc/kayobe/ansible/stackhpc-openstack-tests.yml @@ -42,15 +42,19 @@ - { name: pip } - { name: setuptools } - - name: Ensure required Python packages are installed + - name: Ensure required individual Python packages are installed ansible.builtin.pip: name: - "{{ repo_tmpdir.path }}" - - "-r {{ repo_tmpdir.path }}/requirements.txt" - pytest-html - pytest-timeout virtualenv: "{{ sot_venv }}" + - name: Ensure Python requirements file packages are installed + ansible.builtin.pip: + requirements: "{{ repo_tmpdir.path }}/requirements.txt" + virtualenv: "{{ sot_venv }}" + - name: Include Kolla Ansible passwords ansible.builtin.include_vars: file: "{{ kayobe_env_config_path }}/kolla/passwords.yml"