Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add noqa and disable .ansible-lint global exclusions #6410

Merged
merged 1 commit into from
Jul 27, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 2 additions & 9 deletions .ansible-lint
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,8 @@
parseable: true
skip_list:
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
# The following rules throw errors.
# These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose.
- '301'
- '302'
- '303'
- '305'
- '306'
- '404'
- '503'

# DO NOT add any other rules to this skip_list, instead use local `# noqa` with a comment explaining WHY it is necessary

# These rules are intentionally skipped:
#
Expand Down
2 changes: 1 addition & 1 deletion contrib/azurerm/roles/generate-inventory/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---

- name: Query Azure VMs
- name: Query Azure VMs # noqa 301
command: azure vm list-ip-address --json {{ azure_resource_group }}
register: vm_list_cmd

Expand Down
6 changes: 3 additions & 3 deletions contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
---

- name: Query Azure VMs IPs
- name: Query Azure VMs IPs # noqa 301
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
register: vm_ip_list_cmd

- name: Query Azure VMs Roles
- name: Query Azure VMs Roles # noqa 301
command: az vm list -o json --resource-group {{ azure_resource_group }}
register: vm_list_cmd

- name: Query Azure Load Balancer Public IP
- name: Query Azure Load Balancer Public IP # noqa 301
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
register: lb_pubip_cmd

Expand Down
2 changes: 1 addition & 1 deletion contrib/dind/roles/dind-host/tasks/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@

# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
# handle manually
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
raw: |
echo {{ item | hash('sha1') }} > /etc/machine-id.new
mv -b /etc/machine-id.new /etc/machine-id
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
register: glusterfs_ppa_added
when: glusterfs_ppa_use

- name: Ensure GlusterFS client will reinstall if the PPA was just added.
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
apt:
name: "{{ item }}"
state: absent
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
register: glusterfs_ppa_added
when: glusterfs_ppa_use

- name: Ensure GlusterFS will reinstall if the PPA was just added.
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
apt:
name: "{{ item }}"
state: absent
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
- name: "Delete bootstrap Heketi."
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
- name: "Ensure there is nothing left over."
- name: "Ensure there is nothing left over." # noqa 301
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
- name: "Copy topology configuration into container."
changed_when: false
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
- name: "Load heketi topology."
- name: "Load heketi topology." # noqa 503
when: "render.changed"
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
register: "load_heketi"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
- name: "Provision database volume."
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
when: "heketi_database_volume_exists is undefined"
- name: "Copy configuration from pod."
- name: "Copy configuration from pod." # noqa 301
become: true
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
- name: "Get heketi volume ids."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@
template:
src: "topology.json.j2"
dest: "{{ kube_config_dir }}/topology.json"
- name: "Copy topology configuration into container."
- name: "Copy topology configuration into container." # noqa 503
when: "rendering.changed"
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
- name: "Load heketi topology."
- name: "Load heketi topology." # noqa 503
when: "rendering.changed"
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
- name: "Get heketi topology."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,15 @@
ignore_errors: true
changed_when: false

- name: "Remove volume groups."
- name: "Remove volume groups." # noqa 301
environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true
command: "vgremove {{ volume_group }} --yes"
with_items: "{{ volume_groups.stdout_lines }}"
loop_control: { loop_var: "volume_group" }

- name: "Remove physical volume from cluster disks."
- name: "Remove physical volume from cluster disks." # noqa 301
environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true
Expand Down
22 changes: 11 additions & 11 deletions contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,43 +1,43 @@
---
- name: "Remove storage class."
- name: "Remove storage class." # noqa 301
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
ignore_errors: true
- name: "Tear down heketi."
- name: "Tear down heketi." # noqa 301
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
ignore_errors: true
- name: "Tear down heketi."
- name: "Tear down heketi." # noqa 301
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
ignore_errors: true
- name: "Tear down bootstrap."
include_tasks: "../provision/tasks/bootstrap/tear-down.yml"
- name: "Ensure there is nothing left over."
- name: "Ensure there is nothing left over." # noqa 301
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
retries: 60
delay: 5
- name: "Ensure there is nothing left over."
- name: "Ensure there is nothing left over." # noqa 301
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
retries: 60
delay: 5
- name: "Tear down glusterfs."
- name: "Tear down glusterfs." # noqa 301
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
ignore_errors: true
- name: "Remove heketi storage service."
- name: "Remove heketi storage service." # noqa 301
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
ignore_errors: true
- name: "Remove heketi gluster role binding"
- name: "Remove heketi gluster role binding" # noqa 301
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
ignore_errors: true
- name: "Remove heketi config secret"
- name: "Remove heketi config secret" # noqa 301
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
ignore_errors: true
- name: "Remove heketi db backup"
- name: "Remove heketi db backup" # noqa 301
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
ignore_errors: true
- name: "Remove heketi service account"
- name: "Remove heketi service account" # noqa 301
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
ignore_errors: true
- name: "Get secrets"
Expand Down
4 changes: 2 additions & 2 deletions extra_playbooks/migrate_openstack_provider.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@
src: get_cinder_pvs.sh
dest: /tmp
mode: u+rwx
- name: Get PVs provisioned by in-tree cloud provider
- name: Get PVs provisioned by in-tree cloud provider # noqa 301
command: /tmp/get_cinder_pvs.sh
register: pvs
- name: Remove get_cinder_pvs.sh
file:
path: /tmp/get_cinder_pvs.sh
state: absent
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation # noqa 301
command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org"
loop: "{{ pvs.stdout_lines | list }}"
2 changes: 1 addition & 1 deletion roles/container-engine/containerd/tasks/crictl.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
vars:
download: "{{ download_defaults | combine(downloads.crictl) }}"

- name: Install crictl config
- name: Install crictl config # noqa 404
template:
src: ../templates/crictl.yaml.j2
dest: /etc/crictl.yaml
Expand Down
2 changes: 1 addition & 1 deletion roles/container-engine/containerd/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
tags:
- facts

- name: disable unified_cgroup_hierarchy in Fedora 31+
- name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305
shell:
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
when:
Expand Down
4 changes: 2 additions & 2 deletions roles/container-engine/cri-o/tasks/crictl.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
vars:
download: "{{ download_defaults | combine(downloads.crictl) }}"

- name: Install crictl config
- name: Install crictl config # noqa 404
template:
src: ../templates/crictl.yaml.j2
dest: /etc/crictl.yaml
Expand All @@ -21,7 +21,7 @@
group: no
delegate_to: "{{ inventory_hostname }}"

- name: Get crictl completion
- name: Get crictl completion # noqa 305
shell: "{{ bin_dir }}/crictl completion"
changed_when: False
register: cri_completion
Expand Down
2 changes: 1 addition & 1 deletion roles/container-engine/cri-o/tasks/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
- ansible_distribution == "CentOS"
- ansible_distribution_major_version == "8"

- name: Ensure latest version of libseccom installed
- name: Ensure latest version of libseccom installed # noqa 303
command: "yum update -y libseccomp"
when:
- ansible_distribution == "CentOS"
Expand Down
2 changes: 1 addition & 1 deletion roles/container-engine/docker/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
tags:
- facts

- name: disable unified_cgroup_hierarchy in Fedora 31+
- name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305
shell:
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
when:
Expand Down
4 changes: 2 additions & 2 deletions roles/container-engine/docker/tasks/set_facts_dns.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,13 @@
set_fact:
docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}"

- name: check system nameservers
- name: check system nameservers # noqa 306
shell: grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/'
changed_when: False
register: system_nameservers
check_mode: no

- name: check system search domains
- name: check system search domains # noqa 306
shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
changed_when: False
register: system_search_domains
Expand Down
2 changes: 1 addition & 1 deletion roles/container-engine/docker/tasks/systemd.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
notify: restart docker
when: http_proxy is defined or https_proxy is defined

- name: get systemd version
- name: get systemd version # noqa 306
# noqa 303 - systemctl is called intentionally here
shell: systemctl --version | head -n 1 | cut -d " " -f 2
register: systemd_version
Expand Down
2 changes: 1 addition & 1 deletion roles/download/tasks/check_pull_required.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# the template, just replace all instances of {{ `{{` }} with {{ and {{ '}}' }} with }}.
# It will output something like the following:
# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
- name: check_pull_required | Generate a list of information about the images on a node
- name: check_pull_required | Generate a list of information about the images on a node # noqa 305
shell: "{{ image_info_command }}"
no_log: true
register: docker_images
Expand Down
4 changes: 2 additions & 2 deletions roles/download/tasks/download_container.yml
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
- pull_required or download_run_once
- not image_is_cached

- name: download_container | Save and compress image
- name: download_container | Save and compress image # noqa 305
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}"
delegate_to: "{{ download_delegate }}"
delegate_facts: no
Expand Down Expand Up @@ -103,7 +103,7 @@
- pull_required
- download_force_cache

- name: download_container | Load image into docker
- name: download_container | Load image into docker # noqa 305
shell: "{{ image_load_command }}"
register: container_load_status
failed_when: container_load_status is failed
Expand Down
4 changes: 2 additions & 2 deletions roles/download/tasks/prep_download.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
- localhost
- asserts

- name: prep_download | On localhost, check if user has access to docker without using sudo
- name: prep_download | On localhost, check if user has access to docker without using sudo # noqa 305
shell: "{{ image_info_command_on_localhost }}"
delegate_to: localhost
connection: local
Expand Down Expand Up @@ -68,7 +68,7 @@
- localhost
- asserts

- name: prep_download | Register docker images info
- name: prep_download | Register docker images info # noqa 305
shell: "{{ image_info_command }}"
no_log: true
register: docker_images
Expand Down
2 changes: 1 addition & 1 deletion roles/download/tasks/prep_kubeadm_images.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
mode: "0755"
state: file

- name: prep_kubeadm_images | Generate list of required images
- name: prep_kubeadm_images | Generate list of required images # noqa 306
shell: "{{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -v coredns"
register: kubeadm_images_raw
run_once: true
Expand Down
8 changes: 4 additions & 4 deletions roles/etcd/tasks/configure.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
---
- name: Configure | Check if etcd cluster is healthy
- name: Configure | Check if etcd cluster is healthy # noqa 306
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
register: etcd_cluster_is_healthy
failed_when: false
Expand All @@ -16,7 +16,7 @@
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"

- name: Configure | Check if etcd-events cluster is healthy
- name: Configure | Check if etcd-events cluster is healthy # noqa 306
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
register: etcd_events_cluster_is_healthy
failed_when: false
Expand Down Expand Up @@ -73,7 +73,7 @@
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc == 0 }}"
when: is_etcd_master and etcd_events_cluster_setup

- name: Configure | Wait for etcd cluster to be healthy
- name: Configure | Wait for etcd cluster to be healthy # noqa 306
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
register: etcd_cluster_is_healthy
until: etcd_cluster_is_healthy.rc == 0
Expand All @@ -94,7 +94,7 @@
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"

- name: Configure | Wait for etcd-events cluster to be healthy
- name: Configure | Wait for etcd-events cluster to be healthy # noqa 306
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
register: etcd_events_cluster_is_healthy
until: etcd_events_cluster_is_healthy.rc == 0
Expand Down
2 changes: 1 addition & 1 deletion roles/etcd/tasks/gen_certs_script.yml
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@
inventory_hostname in groups['k8s-cluster']) and
sync_certs|default(false) and inventory_hostname not in groups['etcd']

- name: Gen_certs | Copy certs on nodes
- name: Gen_certs | Copy certs on nodes # noqa 306
shell: "base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}"
args:
executable: /bin/bash
Expand Down
4 changes: 2 additions & 2 deletions roles/etcd/tasks/join_etcd-events_member.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
---
- name: Join Member | Add member to etcd-events cluster
- name: Join Member | Add member to etcd-events cluster # noqa 301 305
shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}"
register: member_add_result
until: member_add_result.rc == 0
Expand All @@ -24,7 +24,7 @@
{%- endif -%}
{%- endfor -%}

- name: Join Member | Ensure member is in etcd-events cluster
- name: Join Member | Ensure member is in etcd-events cluster # noqa 306
shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_events_access_address }}"
register: etcd_events_member_in_cluster
changed_when: false
Expand Down
Loading