Skip to content

Commit

Permalink
Fix ansible-lint E305 (#6459)
Browse files Browse the repository at this point in the history
  • Loading branch information
Miouge1 authored Jul 28, 2020
1 parent 8bd3b50 commit 214e08f
Show file tree
Hide file tree
Showing 22 changed files with 59 additions and 61 deletions.
5 changes: 2 additions & 3 deletions roles/container-engine/containerd/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,8 @@
tags:
- facts

- name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305
shell:
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
- name: disable unified_cgroup_hierarchy in Fedora 31+
command: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
when:
- ansible_distribution == "Fedora"
- (ansible_distribution_major_version | int) >= 31
Expand Down
4 changes: 2 additions & 2 deletions roles/container-engine/cri-o/tasks/crictl.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@
group: no
delegate_to: "{{ inventory_hostname }}"

- name: Get crictl completion # noqa 305
shell: "{{ bin_dir }}/crictl completion"
- name: Get crictl completion
command: "{{ bin_dir }}/crictl completion"
changed_when: False
register: cri_completion

Expand Down
5 changes: 2 additions & 3 deletions roles/container-engine/docker/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -47,9 +47,8 @@
tags:
- facts

- name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305
shell:
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
- name: disable unified_cgroup_hierarchy in Fedora 31+
command: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
when:
- ansible_distribution == "Fedora"
- (ansible_distribution_major_version | int) >= 31
Expand Down
2 changes: 1 addition & 1 deletion roles/download/tasks/check_pull_required.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# the template, just replace all instances of {{ `{{` }} with {{ and {{ '}}' }} with }}.
# It will output something like the following:
# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
- name: check_pull_required | Generate a list of information about the images on a node # noqa 305
- name: check_pull_required | Generate a list of information about the images on a node # noqa 305 image_info_command contains a pipe, therefore requiring shell
shell: "{{ image_info_command }}"
no_log: true
register: docker_images
Expand Down
8 changes: 4 additions & 4 deletions roles/download/tasks/download_container.yml
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@
- pull_required or download_run_once
- not image_is_cached

- name: download_container | Save and compress image # noqa 305
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}"
- name: download_container | Save and compress image
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa 305 image_save_command_on_localhost contains a pipe, therefore requires shell
delegate_to: "{{ download_delegate }}"
delegate_facts: no
register: container_save_status
Expand Down Expand Up @@ -103,8 +103,8 @@
- pull_required
- download_force_cache

- name: download_container | Load image into docker # noqa 305
shell: "{{ image_load_command }}"
- name: download_container | Load image into docker
shell: "{{ image_load_command }}" # noqa 305 image_load_command uses pipes, therefore requires shell
register: container_load_status
failed_when: container_load_status is failed
when:
Expand Down
8 changes: 4 additions & 4 deletions roles/download/tasks/prep_download.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@
- localhost
- asserts

- name: prep_download | On localhost, check if user has access to docker without using sudo # noqa 305
shell: "{{ image_info_command_on_localhost }}"
- name: prep_download | On localhost, check if user has access to docker without using sudo
shell: "{{ image_info_command_on_localhost }}" # noqa 305 image_info_command_on_localhost contains pipe, therefore requires shell
delegate_to: localhost
connection: local
run_once: true
Expand Down Expand Up @@ -68,8 +68,8 @@
- localhost
- asserts

- name: prep_download | Register docker images info # noqa 305
shell: "{{ image_info_command }}"
- name: prep_download | Register docker images info
shell: "{{ image_info_command }}" # noqa 305 image_info_command contains pipe therefore requires shell
no_log: true
register: docker_images
failed_when: false
Expand Down
4 changes: 2 additions & 2 deletions roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
- name: "calico upgrade complete" # noqa 305
shell: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
- name: "calico upgrade complete"
command: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
when:
- inventory_hostname == groups['kube-master'][0]
- calico_upgrade_enabled|default(True)
Expand Down
8 changes: 4 additions & 4 deletions roles/kubernetes/kubeadm/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -131,8 +131,8 @@
group: root
mode: "0644"

- name: Restart all kube-proxy pods to ensure that they load the new configmap # noqa 305
shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
- name: Restart all kube-proxy pods to ensure that they load the new configmap
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
run_once: true
delegate_to: "{{ groups['kube-master']|first }}"
delegate_facts: false
Expand All @@ -157,8 +157,8 @@

# FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776
# is fixed
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services # noqa 305
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
run_once: true
delegate_to: "{{ groups['kube-master']|first }}"
when:
Expand Down
4 changes: 2 additions & 2 deletions roles/kubernetes/node/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@
tags:
- kube-proxy

- name: Verify if br_netfilter module exists # noqa 305
shell: "modinfo br_netfilter"
- name: Verify if br_netfilter module exists
command: "modinfo br_netfilter"
environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management
register: modinfo_br_netfilter
Expand Down
4 changes: 2 additions & 2 deletions roles/kubernetes/preinstall/tasks/0040-set_facts.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
set_fact:
is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}"

- name: check resolvconf # noqa 305
shell: which resolvconf
- name: check resolvconf
command: which resolvconf
register: resolvconf
failed_when: false
changed_when: false
Expand Down
4 changes: 2 additions & 2 deletions roles/kubernetes/preinstall/tasks/0070-system-packages.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
- name: Update package management cache (zypper) - SUSE # noqa 305
shell: zypper -n --gpg-auto-import-keys ref
- name: Update package management cache (zypper) - SUSE
command: zypper -n --gpg-auto-import-keys ref
register: make_cache_output
until: make_cache_output is succeeded
retries: 4
Expand Down
4 changes: 2 additions & 2 deletions roles/kubernetes/tokens/tasks/gen_tokens.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@
delegate_to: "{{ groups['kube-master'][0] }}"
when: gen_tokens|default(false)

- name: Gen_tokens | Get list of tokens from first master # noqa 305
shell: "(find {{ kube_token_dir }} -maxdepth 1 -type f)"
- name: Gen_tokens | Get list of tokens from first master
command: "find {{ kube_token_dir }} -maxdepth 1 -type f"
register: tokens_list
check_mode: no
delegate_to: "{{ groups['kube-master'][0] }}"
Expand Down
4 changes: 2 additions & 2 deletions roles/network_plugin/kube-ovn/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
- name: Kube-OVN | Label ovn-db node # noqa 305
shell: >-
- name: Kube-OVN | Label ovn-db node
command: >-
{{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master
when:
- inventory_hostname == groups['kube-master'][0]
Expand Down
12 changes: 6 additions & 6 deletions roles/recover_control_plane/etcd/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
- name: Get etcd endpoint health # noqa 305
shell: "{{ bin_dir }}/etcdctl endpoint health"
- name: Get etcd endpoint health
command: "{{ bin_dir }}/etcdctl endpoint health"
register: etcd_endpoint_health
ignore_errors: true
changed_when: false
Expand Down Expand Up @@ -57,8 +57,8 @@
- groups['broken_etcd']
- "item.rc != 0 and not 'No such file or directory' in item.stderr"

- name: Get etcd cluster members # noqa 305
shell: "{{ bin_dir }}/etcdctl member list"
- name: Get etcd cluster members
command: "{{ bin_dir }}/etcdctl member list"
register: member_list
changed_when: false
check_mode: no
Expand All @@ -73,8 +73,8 @@
- not healthy
- has_quorum

- name: Remove broken cluster members # noqa 305
shell: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}"
- name: Remove broken cluster members
command: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}"
environment:
ETCDCTL_API: 3
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
- name: Save etcd snapshot # noqa 305
shell: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db"
- name: Save etcd snapshot
command: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db"
environment:
- ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
- ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
Expand Down
8 changes: 4 additions & 4 deletions roles/recover_control_plane/master/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
- name: Wait for apiserver # noqa 305
shell: "{{ bin_dir }}/kubectl get nodes"
- name: Wait for apiserver
command: "{{ bin_dir }}/kubectl get nodes"
environment:
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
register: apiserver_is_ready
Expand All @@ -10,8 +10,8 @@
changed_when: false
when: groups['broken_kube-master']

- name: Delete broken kube-master nodes from cluster # noqa 305
shell: "{{ bin_dir }}/kubectl delete node {{ item }}"
- name: Delete broken kube-master nodes from cluster
command: "{{ bin_dir }}/kubectl delete node {{ item }}"
environment:
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
with_items: "{{ groups['broken_kube-master'] }}"
Expand Down
4 changes: 2 additions & 2 deletions roles/remove-node/remove-etcd-node/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@
delegate_to: "{{ groups['etcd']|first }}"
when: inventory_hostname in groups['etcd']

- name: Remove etcd member from cluster # noqa 305
shell: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
- name: Remove etcd member from cluster
command: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
register: etcd_member_in_cluster
changed_when: false
check_mode: no
Expand Down
4 changes: 2 additions & 2 deletions roles/reset/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -161,8 +161,8 @@
tags:
- iptables

- name: Clear IPVS virtual server table # noqa 305
shell: "ipvsadm -C"
- name: Clear IPVS virtual server table
command: "ipvsadm -C"
when:
- kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s-cluster']

Expand Down
4 changes: 2 additions & 2 deletions tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
state: absent
name: "{{ test_name }}"

- name: Wait for namespace {{ test_name }} to be fully deleted # noqa 305
shell: kubectl get ns {{ test_name }}
- name: Wait for namespace {{ test_name }} to be fully deleted
command: kubectl get ns {{ test_name }}
register: delete_namespace
failed_when:
- delete_namespace.rc == 0
Expand Down
8 changes: 4 additions & 4 deletions tests/testcases/030_check-network.yml
Original file line number Diff line number Diff line change
Expand Up @@ -89,17 +89,17 @@
- item in pods_running
with_items: "{{ pod_ips }}"

- name: Ping between pods is working # noqa 305
shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
- name: Ping between pods is working
command: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
when:
- not item[0] in pods_hostnet
- not item[1] in pods_hostnet
with_nested:
- "{{ pod_names }}"
- "{{ pod_ips }}"

- name: Ping between hostnet pods is working # noqa 305
shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
- name: Ping between hostnet pods is working
command: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
when:
- item[0] in pods_hostnet
- item[1] in pods_hostnet
Expand Down
8 changes: 4 additions & 4 deletions tests/testcases/040_check-network-adv.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@
netchecker_port: 31081

tasks:
- name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282) # noqa 305
shell: "ethtool --offload flannel.1 rx off tx off"
- name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282)
command: "ethtool --offload flannel.1 rx off tx off"
ignore_errors: true
when:
- kube_network_plugin|default('calico') == 'flannel'
Expand Down Expand Up @@ -214,8 +214,8 @@
- inventory_hostname == groups['kube-master'][0]
- kube_network_plugin_multus|default(false)|bool

- name: Check secondary macvlan interface # noqa 305
shell: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1"
- name: Check secondary macvlan interface
command: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1"
register: output
until: output.rc == 0
retries: 90
Expand Down
4 changes: 2 additions & 2 deletions tests/testcases/roles/cluster-dump/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
- name: Generate dump folder # noqa 305
shell: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump"
- name: Generate dump folder
command: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump"
no_log: true
when: inventory_hostname in groups['kube-master']

Expand Down

0 comments on commit 214e08f

Please sign in to comment.