diff --git a/.ansible-lint b/.ansible-lint index ececfc57359..e1909e9666e 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -2,15 +2,8 @@ parseable: true skip_list: # see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules - # The following rules throw errors. - # These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose. - - '301' - - '302' - - '303' - - '305' - - '306' - - '404' - - '503' + + # DO NOT add any other rules to this skip_list, instead use local `# noqa` with a comment explaining WHY it is necessary # These rules are intentionally skipped: # diff --git a/.gitlab-ci/shellcheck.yml b/.gitlab-ci/shellcheck.yml index 78e32c94316..96c576c342c 100644 --- a/.gitlab-ci/shellcheck.yml +++ b/.gitlab-ci/shellcheck.yml @@ -7,7 +7,7 @@ shellcheck: SHELLCHECK_VERSION: v0.6.0 before_script: - ./tests/scripts/rebase.sh - - curl --silent "https://storage.googleapis.com/shellcheck/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv + - curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv - cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/ - shellcheck --version script: diff --git a/README.md b/README.md index 31bd0b48d6a..8656bede1ba 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**. You can get your invite [here](http://slack.k8s.io/) -- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Packet (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal** +- Can be deployed on **[AWS](docs/aws.md), GCE, [Azure](docs/azure.md), [OpenStack](docs/openstack.md), [vSphere](docs/vsphere.md), [Packet](docs/packet.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal** - **Highly available** cluster - **Composable** (Choice of the network plugin for instance) - Supports most popular **Linux distributions** @@ -129,9 +129,10 @@ Note: Upstart/SysV init based OS types are not supported. - [flanneld](https://github.com/coreos/flannel) v0.12.0 - [kube-ovn](https://github.com/alauda/kube-ovn) v1.2.1 - [kube-router](https://github.com/cloudnativelabs/kube-router) v1.0.0 - - [multus](https://github.com/intel/multus-cni) v3.4.2 + - [multus](https://github.com/intel/multus-cni) v3.6.0 - [weave](https://github.com/weaveworks/weave) v2.6.5 - Application + - [ambassador](https://github.com/datawire/ambassador): v1.5 - [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11 - [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11 - [cert-manager](https://github.com/jetstack/cert-manager) v0.11.1 @@ -197,6 +198,12 @@ The choice is defined with the variable `kube_network_plugin`. There is also an option to leverage built-in cloud provider networking instead. See also [Network checker](docs/netcheck.md). +## Ingress Plugins + +- [ambassador](docs/ambassador.md): the Ambassador Ingress Controller and API gateway. + +- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller. + ## Community docs and resources - [kubernetes.io/docs/setup/production-environment/tools/kubespray/](https://kubernetes.io/docs/setup/production-environment/tools/kubespray/) @@ -211,7 +218,8 @@ See also [Network checker](docs/netcheck.md). ## CI Tests -[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/build.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines) +[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/pipeline.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines) + +CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Packet](https://www.packet.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/). -CI/end-to-end tests sponsored by Google (GCE) See the [test matrix](docs/test_cases.md) for details. diff --git a/contrib/azurerm/roles/generate-inventory/tasks/main.yml b/contrib/azurerm/roles/generate-inventory/tasks/main.yml index 20a06e10c40..ccc5e219a7a 100644 --- a/contrib/azurerm/roles/generate-inventory/tasks/main.yml +++ b/contrib/azurerm/roles/generate-inventory/tasks/main.yml @@ -1,6 +1,6 @@ --- -- name: Query Azure VMs +- name: Query Azure VMs # noqa 301 command: azure vm list-ip-address --json {{ azure_resource_group }} register: vm_list_cmd diff --git a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml index e53912cfcd0..6ba7d5a8733 100644 --- a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml +++ b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml @@ -1,14 +1,14 @@ --- -- name: Query Azure VMs IPs +- name: Query Azure VMs IPs # noqa 301 command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }} register: vm_ip_list_cmd -- name: Query Azure VMs Roles +- name: Query Azure VMs Roles # noqa 301 command: az vm list -o json --resource-group {{ azure_resource_group }} register: vm_list_cmd -- name: Query Azure Load Balancer Public IP +- name: Query Azure Load Balancer Public IP # noqa 301 command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip register: lb_pubip_cmd diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml index 40ca53cd6d9..5b63a6b37d0 100644 --- a/contrib/dind/roles/dind-host/tasks/main.yaml +++ b/contrib/dind/roles/dind-host/tasks/main.yaml @@ -69,7 +69,7 @@ # Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian, # handle manually -- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) +- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301 raw: | echo {{ item | hash('sha1') }} > /etc/machine-id.new mv -b /etc/machine-id.new /etc/machine-id diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml index 8f80914f872..2865b100410 100644 --- a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml +++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml @@ -7,7 +7,7 @@ register: glusterfs_ppa_added when: glusterfs_ppa_use -- name: Ensure GlusterFS client will reinstall if the PPA was just added. +- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503 apt: name: "{{ item }}" state: absent diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml index 3b586c539a0..855fe36bf5f 100644 --- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml +++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml @@ -7,7 +7,7 @@ register: glusterfs_ppa_added when: glusterfs_ppa_use -- name: Ensure GlusterFS will reinstall if the PPA was just added. +- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503 apt: name: "{{ item }}" state: absent diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml index 0ffd6f469f2..e6b16e54a10 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml @@ -6,7 +6,7 @@ - name: "Delete bootstrap Heketi." command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\"" when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0" -- name: "Ensure there is nothing left over." +- name: "Ensure there is nothing left over." # noqa 301 command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json" register: "heketi_result" until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml index 7d2c5981e7e..07e86237cec 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml @@ -13,7 +13,7 @@ - name: "Copy topology configuration into container." changed_when: false command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json" -- name: "Load heketi topology." +- name: "Load heketi topology." # noqa 503 when: "render.changed" command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json" register: "load_heketi" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml index 14ab9779399..dc93d782877 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml @@ -18,7 +18,7 @@ - name: "Provision database volume." command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage" when: "heketi_database_volume_exists is undefined" -- name: "Copy configuration from pod." +- name: "Copy configuration from pod." # noqa 301 become: true command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json" - name: "Get heketi volume ids." diff --git a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml index dd1e272beb0..4430a55926a 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml @@ -10,10 +10,10 @@ template: src: "topology.json.j2" dest: "{{ kube_config_dir }}/topology.json" -- name: "Copy topology configuration into container." +- name: "Copy topology configuration into container." # noqa 503 when: "rendering.changed" command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json" -- name: "Load heketi topology." +- name: "Load heketi topology." # noqa 503 when: "rendering.changed" command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json" - name: "Get heketi topology." diff --git a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml index 9ace96e6274..7ddbf65c81f 100644 --- a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml +++ b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml @@ -22,7 +22,7 @@ ignore_errors: true changed_when: false -- name: "Remove volume groups." +- name: "Remove volume groups." # noqa 301 environment: PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management become: true @@ -30,7 +30,7 @@ with_items: "{{ volume_groups.stdout_lines }}" loop_control: { loop_var: "volume_group" } -- name: "Remove physical volume from cluster disks." +- name: "Remove physical volume from cluster disks." # noqa 301 environment: PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management become: true diff --git a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml index ddc56b256ad..18c11a7315e 100644 --- a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml +++ b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml @@ -1,43 +1,43 @@ --- -- name: "Remove storage class." +- name: "Remove storage class." # noqa 301 command: "{{ bin_dir }}/kubectl delete storageclass gluster" ignore_errors: true -- name: "Tear down heketi." +- name: "Tear down heketi." # noqa 301 command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\"" ignore_errors: true -- name: "Tear down heketi." +- name: "Tear down heketi." # noqa 301 command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\"" ignore_errors: true - name: "Tear down bootstrap." include_tasks: "../provision/tasks/bootstrap/tear-down.yml" -- name: "Ensure there is nothing left over." +- name: "Ensure there is nothing left over." # noqa 301 command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json" register: "heketi_result" until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" retries: 60 delay: 5 -- name: "Ensure there is nothing left over." +- name: "Ensure there is nothing left over." # noqa 301 command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json" register: "heketi_result" until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" retries: 60 delay: 5 -- name: "Tear down glusterfs." +- name: "Tear down glusterfs." # noqa 301 command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs" ignore_errors: true -- name: "Remove heketi storage service." +- name: "Remove heketi storage service." # noqa 301 command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints" ignore_errors: true -- name: "Remove heketi gluster role binding" +- name: "Remove heketi gluster role binding" # noqa 301 command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin" ignore_errors: true -- name: "Remove heketi config secret" +- name: "Remove heketi config secret" # noqa 301 command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret" ignore_errors: true -- name: "Remove heketi db backup" +- name: "Remove heketi db backup" # noqa 301 command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup" ignore_errors: true -- name: "Remove heketi service account" +- name: "Remove heketi service account" # noqa 301 command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account" ignore_errors: true - name: "Get secrets" diff --git a/docs/_sidebar.md b/docs/_sidebar.md index bcfbd1adb5f..fb5374fdbf7 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -17,6 +17,8 @@ * [Kube Router](docs/kube-router.md) * [Weave](docs/weave.md) * [Multus](docs/multus.md) +* Ingress + * [Ambassador](docs/ambassador.md) * [Cloud providers](docs/cloud.md) * [AWS](docs/aws.md) * [Azure](docs/azure.md) @@ -26,7 +28,7 @@ * Operating Systems * [Debian](docs/debian.md) * [Coreos](docs/coreos.md) - * [Fedora CoreOS](docs/fcos.md) + * [Fedora CoreOS](docs/fcos.md) * [OpenSUSE](docs/opensuse.md) * Advanced * [Proxy](/docs/proxy.md) diff --git a/docs/ambassador.md b/docs/ambassador.md new file mode 100644 index 00000000000..cce6da373cd --- /dev/null +++ b/docs/ambassador.md @@ -0,0 +1,86 @@ + +# Ambassador + +The Ambassador API Gateway provides all the functionality of a traditional ingress controller +(e.g., path-based routing) while exposing many additional capabilities such as authentication, +URL rewriting, CORS, rate limiting, and automatic metrics collection. + +## Installation + +### Configuration + +* `ingress_ambassador_namespace` (default `ambassador`): namespace for installing Ambassador. +* `ingress_ambassador_update_window` (default `0 0 * * SUN`): _crontab_-like expression + for specifying when the Operator should try to update the Ambassador API Gateway. +* `ingress_ambassador_version` (defaulkt: `*`): SemVer rule for versions allowed for + installation/updates. +* `ingress_ambassador_secure_port` (default: 443): HTTPS port to listen at. +* `ingress_ambassador_insecure_port` (default: 80): HTTP port to listen at. + +### Ambassador Operator + +This Ambassador addon deploys the Ambassador Operator, which in turn will install Ambassador in +a Kubernetes cluster. + +The Ambassador Operator is a Kubernetes Operator that controls Ambassador's complete lifecycle +in your cluster, automating many of the repeatable tasks you would otherwise have to perform +yourself. Once installed, the Operator will complete installations and seamlessly upgrade to new +versions of Ambassador as they become available. + +## Usage + +The following example creates simple http-echo services and an `Ingress` object +to route to these services. + +Note well that the Ambassador API Gateway will automatically load balance `Ingress` resources +that include the annotation `kubernetes.io/ingress.class=ambassador`. All the other +resources will be just ignored. + +```yaml +kind: Pod +apiVersion: v1 +metadata: + name: foo-app + labels: + app: foo +spec: + containers: + - name: foo-app + image: hashicorp/http-echo + args: + - "-text=foo" +--- +kind: Service +apiVersion: v1 +metadata: + name: foo-service +spec: + selector: + app: foo + ports: + # Default port used by the image + - port: 5678 +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-ingress + annotations: + kubernetes.io/ingress.class: ambassador +spec: + rules: + - http: + paths: + - path: /foo + backend: + serviceName: foo-service + servicePort: 5678 +``` + +Now you can test that the ingress is working with curl: + +```console +$ export AMB_IP=$(kubectl get service ambassador -n ambassador -o 'go-template={{range .status.loadBalancer.ingress}}{{print .ip "\n"}}{{end}}') +$ curl $AMB_IP/foo +foo +``` diff --git a/docs/openstack.md b/docs/openstack.md index c74ecbec47c..fa7e0d06743 100644 --- a/docs/openstack.md +++ b/docs/openstack.md @@ -1,8 +1,25 @@ -OpenStack -========= -The in-tree cloud provider --------------------------- +# OpenStack + +## Known compatible public clouds + +Kubespray has been tested on a number of OpenStack Public Clouds including (in alphabetical order): + +- [Auro](https://auro.io/) +- [Betacloud](https://www.betacloud.io/) +- [CityCloud](https://www.citycloud.com/) +- [DreamHost](https://www.dreamhost.com/cloud/computing/) +- [ELASTX](https://elastx.se/) +- [EnterCloudSuite](https://www.entercloudsuite.com/) +- [FugaCloud](https://fuga.cloud/) +- [Open Telekom Cloud](https://cloud.telekom.de/) : requires to set the variable `wait_for_floatingip = "true"` in your cluster.tfvars +- [OVHcloud](https://www.ovhcloud.com/) +- [Rackspace](https://www.rackspace.com/) +- [Ultimum](https://ultimum.io/) +- [VexxHost](https://vexxhost.com/) +- [Zetta](https://www.zetta.io/) + +## The in-tree cloud provider To deploy Kubespray on [OpenStack](https://www.openstack.org/) uncomment the `cloud_provider` option in `group_vars/all/all.yml` and set it to `openstack`. @@ -62,8 +79,7 @@ If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep Now you can finally run the playbook. -The external cloud provider ---------------------------- +## The external cloud provider The in-tree cloud provider is deprecated and will be removed in a future version of Kubernetes. The target release for removing all remaining in-tree cloud providers is set to 1.21. diff --git a/extra_playbooks/migrate_openstack_provider.yml b/extra_playbooks/migrate_openstack_provider.yml index 114e4cf0c7d..0e1584470d7 100644 --- a/extra_playbooks/migrate_openstack_provider.yml +++ b/extra_playbooks/migrate_openstack_provider.yml @@ -16,13 +16,13 @@ src: get_cinder_pvs.sh dest: /tmp mode: u+rwx - - name: Get PVs provisioned by in-tree cloud provider + - name: Get PVs provisioned by in-tree cloud provider # noqa 301 command: /tmp/get_cinder_pvs.sh register: pvs - name: Remove get_cinder_pvs.sh file: path: /tmp/get_cinder_pvs.sh state: absent - - name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation + - name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation # noqa 301 command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org" loop: "{{ pvs.stdout_lines | list }}" diff --git a/remove-node.yml b/remove-node.yml index c1c2cddce04..7cb25ca51d4 100644 --- a/remove-node.yml +++ b/remove-node.yml @@ -2,6 +2,21 @@ - name: Check ansible version import_playbook: ansible_version.yml +- hosts: all + gather_facts: false + tags: always + tasks: + - name: "Set up proxy environment" + set_fact: + proxy_env: + http_proxy: "{{ http_proxy | default ('') }}" + HTTP_PROXY: "{{ http_proxy | default ('') }}" + https_proxy: "{{ https_proxy | default ('') }}" + HTTPS_PROXY: "{{ https_proxy | default ('') }}" + no_proxy: "{{ no_proxy | default ('') }}" + NO_PROXY: "{{ no_proxy | default ('') }}" + no_log: true + - hosts: "{{ node | default('etcd:k8s-cluster:calico-rr') }}" gather_facts: no vars_prompt: diff --git a/roles/container-engine/containerd/tasks/crictl.yml b/roles/container-engine/containerd/tasks/crictl.yml index eaa94efa3f7..8480951665a 100644 --- a/roles/container-engine/containerd/tasks/crictl.yml +++ b/roles/container-engine/containerd/tasks/crictl.yml @@ -6,7 +6,7 @@ - name: Install crictl config template: - src: ../templates/crictl.yaml.j2 + src: ../templates/crictl.yaml.j2 # noqa 404 not in role_path when task is included from download role dest: /etc/crictl.yaml owner: bin mode: 0644 diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml index 7a80b85e159..74e871fdd7a 100644 --- a/roles/container-engine/containerd/tasks/main.yml +++ b/roles/container-engine/containerd/tasks/main.yml @@ -35,8 +35,7 @@ - facts - name: disable unified_cgroup_hierarchy in Fedora 31+ - shell: - cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0" + command: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0" when: - ansible_distribution == "Fedora" - (ansible_distribution_major_version | int) >= 31 diff --git a/roles/container-engine/cri-o/tasks/crictl.yml b/roles/container-engine/cri-o/tasks/crictl.yml index e9698053349..5749574577c 100644 --- a/roles/container-engine/cri-o/tasks/crictl.yml +++ b/roles/container-engine/cri-o/tasks/crictl.yml @@ -6,7 +6,7 @@ - name: Install crictl config template: - src: ../templates/crictl.yaml.j2 + src: ../templates/crictl.yaml.j2 # noqa 404 not in role_path when task is included from download role dest: /etc/crictl.yaml owner: bin mode: 0644 @@ -22,7 +22,7 @@ delegate_to: "{{ inventory_hostname }}" - name: Get crictl completion - shell: "{{ bin_dir }}/crictl completion" + command: "{{ bin_dir }}/crictl completion" changed_when: False register: cri_completion diff --git a/roles/container-engine/cri-o/tasks/main.yaml b/roles/container-engine/cri-o/tasks/main.yaml index af0ecb92eec..6b6f114d251 100644 --- a/roles/container-engine/cri-o/tasks/main.yaml +++ b/roles/container-engine/cri-o/tasks/main.yaml @@ -59,7 +59,7 @@ - ansible_distribution == "CentOS" - ansible_distribution_major_version == "8" -- name: Ensure latest version of libseccom installed +- name: Ensure latest version of libseccom installed # noqa 303 command: "yum update -y libseccomp" when: - ansible_distribution == "CentOS" diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml index 9369186bb7a..b0d5adbf6cb 100644 --- a/roles/container-engine/docker/tasks/main.yml +++ b/roles/container-engine/docker/tasks/main.yml @@ -48,8 +48,7 @@ - facts - name: disable unified_cgroup_hierarchy in Fedora 31+ - shell: - cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0" + command: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0" when: - ansible_distribution == "Fedora" - (ansible_distribution_major_version | int) >= 31 diff --git a/roles/container-engine/docker/tasks/set_facts_dns.yml b/roles/container-engine/docker/tasks/set_facts_dns.yml index 23464dabb04..b884c7cf060 100644 --- a/roles/container-engine/docker/tasks/set_facts_dns.yml +++ b/roles/container-engine/docker/tasks/set_facts_dns.yml @@ -28,13 +28,13 @@ set_fact: docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}" -- name: check system nameservers +- name: check system nameservers # noqa 306 shell: grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/' changed_when: False register: system_nameservers check_mode: no -- name: check system search domains +- name: check system search domains # noqa 306 shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/' changed_when: False register: system_search_domains diff --git a/roles/container-engine/docker/tasks/systemd.yml b/roles/container-engine/docker/tasks/systemd.yml index 0a232ea9eed..108eea18854 100644 --- a/roles/container-engine/docker/tasks/systemd.yml +++ b/roles/container-engine/docker/tasks/systemd.yml @@ -11,7 +11,7 @@ notify: restart docker when: http_proxy is defined or https_proxy is defined -- name: get systemd version +- name: get systemd version # noqa 306 # noqa 303 - systemctl is called intentionally here shell: systemctl --version | head -n 1 | cut -d " " -f 2 register: systemd_version diff --git a/roles/container-engine/docker/vars/debian.yml b/roles/container-engine/docker/vars/debian.yml index c266302eb0b..dd39f8788e4 100644 --- a/roles/container-engine/docker/vars/debian.yml +++ b/roles/container-engine/docker/vars/debian.yml @@ -12,14 +12,14 @@ docker_versioned_pkg: '18.03': docker-ce=18.03.1~ce-0~debian '18.06': docker-ce=18.06.2~ce~3-0~debian '18.09': docker-ce=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }} - '19.03': docker-ce=5:19.03.11~3-0~debian-{{ ansible_distribution_release|lower }} - 'stable': docker-ce=5:19.03.11~3-0~debian-{{ ansible_distribution_release|lower }} - 'edge': docker-ce=5:19.03.11~3-0~debian-{{ ansible_distribution_release|lower }} + '19.03': docker-ce=5:19.03.12~3-0~debian-{{ ansible_distribution_release|lower }} + 'stable': docker-ce=5:19.03.12~3-0~debian-{{ ansible_distribution_release|lower }} + 'edge': docker-ce=5:19.03.12~3-0~debian-{{ ansible_distribution_release|lower }} docker_cli_versioned_pkg: 'latest': docker-ce-cli '18.09': docker-ce-cli=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }} - '19.03': docker-ce-cli=5:19.03.11~3-0~debian-{{ ansible_distribution_release|lower }} + '19.03': docker-ce-cli=5:19.03.12~3-0~debian-{{ ansible_distribution_release|lower }} docker_package_info: pkg_mgr: apt diff --git a/roles/container-engine/docker/vars/fedora.yml b/roles/container-engine/docker/vars/fedora.yml index 2f9bb4ce2ef..493ef764cd3 100644 --- a/roles/container-engine/docker/vars/fedora.yml +++ b/roles/container-engine/docker/vars/fedora.yml @@ -9,14 +9,14 @@ docker_versioned_pkg: '18.03': docker-ce-18.03.1.ce-3.fc{{ ansible_distribution_major_version }} '18.06': docker-ce-18.06.2.ce-3.fc{{ ansible_distribution_major_version }} '18.09': docker-ce-18.09.7-3.fc{{ ansible_distribution_major_version }} - '19.03': docker-ce-19.03.11-3.fc{{ ansible_distribution_major_version }} - 'stable': docker-ce-19.03.11-3.fc{{ ansible_distribution_major_version }} - 'edge': docker-ce-19.03.11-3.fc{{ ansible_distribution_major_version }} + '19.03': docker-ce-19.03.12-3.fc{{ ansible_distribution_major_version }} + 'stable': docker-ce-19.03.12-3.fc{{ ansible_distribution_major_version }} + 'edge': docker-ce-19.03.12-3.fc{{ ansible_distribution_major_version }} docker_cli_versioned_pkg: 'latest': docker-ce-cli - '18.09': docker-ce-cli-19.03.11-3.fc{{ ansible_distribution_major_version }} - '19.03': docker-ce-cli-19.03.11-3.fc{{ ansible_distribution_major_version }} + '18.09': docker-ce-cli-19.03.12-3.fc{{ ansible_distribution_major_version }} + '19.03': docker-ce-cli-19.03.12-3.fc{{ ansible_distribution_major_version }} docker_package_info: pkg_mgr: dnf diff --git a/roles/container-engine/docker/vars/redhat.yml b/roles/container-engine/docker/vars/redhat.yml index 3c543a9d16c..7e24349b44c 100644 --- a/roles/container-engine/docker/vars/redhat.yml +++ b/roles/container-engine/docker/vars/redhat.yml @@ -12,14 +12,14 @@ docker_versioned_pkg: '18.03': docker-ce-18.03.1.ce-1.el7.centos '18.06': docker-ce-18.06.3.ce-3.el7 '18.09': docker-ce-18.09.9-3.el7 - '19.03': docker-ce-19.03.11-3.el7 - 'stable': docker-ce-19.03.11-3.el7 - 'edge': docker-ce-19.03.11-3.el7 + '19.03': docker-ce-19.03.12-3.el7 + 'stable': docker-ce-19.03.12-3.el7 + 'edge': docker-ce-19.03.12-3.el7 docker_cli_versioned_pkg: 'latest': docker-ce-cli '18.09': docker-ce-cli-18.09.9-3.el7 - '19.03': docker-ce-cli-19.03.11-3.el7 + '19.03': docker-ce-cli-19.03.12-3.el7 docker_selinux_versioned_pkg: 'latest': docker-ce-selinux-17.03.3.ce-1.el7 diff --git a/roles/container-engine/docker/vars/ubuntu-amd64.yml b/roles/container-engine/docker/vars/ubuntu-amd64.yml index 0f264e5e3c3..fcc6d140fc9 100644 --- a/roles/container-engine/docker/vars/ubuntu-amd64.yml +++ b/roles/container-engine/docker/vars/ubuntu-amd64.yml @@ -12,14 +12,14 @@ docker_versioned_pkg: '17.12': docker-ce=17.12.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }} '18.06': docker-ce=18.06.2~ce~3-0~ubuntu '18.09': docker-ce=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} - '19.03': docker-ce=5:19.03.11~3-0~ubuntu-{{ ansible_distribution_release|lower }} - 'stable': docker-ce=5:19.03.11~3-0~ubuntu-{{ ansible_distribution_release|lower }} - 'edge': docker-ce=5:19.03.11~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '19.03': docker-ce=5:19.03.12~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'stable': docker-ce=5:19.03.12~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'edge': docker-ce=5:19.03.12~3-0~ubuntu-{{ ansible_distribution_release|lower }} docker_cli_versioned_pkg: 'latest': docker-ce-cli '18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} - '19.03': docker-ce-cli=5:19.03.11~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '19.03': docker-ce-cli=5:19.03.12~3-0~ubuntu-{{ ansible_distribution_release|lower }} docker_package_info: pkg_mgr: apt diff --git a/roles/container-engine/docker/vars/ubuntu-arm64.yml b/roles/container-engine/docker/vars/ubuntu-arm64.yml index bd35458e52c..b65a5b4c241 100644 --- a/roles/container-engine/docker/vars/ubuntu-arm64.yml +++ b/roles/container-engine/docker/vars/ubuntu-arm64.yml @@ -8,14 +8,14 @@ docker_versioned_pkg: '17.12': docker-ce=17.12.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }} '18.06': docker-ce=18.06.2~ce~3-0~ubuntu '18.09': docker-ce=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} - '19.03': docker-ce=5:19.03.11~3-0~ubuntu-{{ ansible_distribution_release|lower }} - 'stable': docker-ce=5:19.03.11~3-0~ubuntu-{{ ansible_distribution_release|lower }} - 'edge': docker-ce=5:19.03.11~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '19.03': docker-ce=5:19.03.12~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'stable': docker-ce=5:19.03.12~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'edge': docker-ce=5:19.03.12~3-0~ubuntu-{{ ansible_distribution_release|lower }} docker_cli_versioned_pkg: 'latest': docker-ce-cli '18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} - '19.03': docker-ce-cli=5:19.03.11~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '19.03': docker-ce-cli=5:19.03.12~3-0~ubuntu-{{ ansible_distribution_release|lower }} docker_package_info: pkg_mgr: apt diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index b9ca5c68eac..1d58888cb3a 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -82,7 +82,7 @@ contiv_version: 1.2.1 cilium_version: "v1.8.1" kube_ovn_version: "v1.2.1" kube_router_version: "v1.0.0" -multus_version: "v3.4.2" +multus_version: "v3.6" # Get kubernetes major version (i.e. 1.17.4 => 1.17) kube_major_version: "{{ kube_version | regex_replace('^v([0-9])+\\.([0-9]+)\\.[0-9]+', 'v\\1.\\2') }}" diff --git a/roles/download/tasks/check_pull_required.yml b/roles/download/tasks/check_pull_required.yml index 9361b87c5ba..cc31a1423ed 100644 --- a/roles/download/tasks/check_pull_required.yml +++ b/roles/download/tasks/check_pull_required.yml @@ -4,7 +4,7 @@ # the template, just replace all instances of {{ `{{` }} with {{ and {{ '}}' }} with }}. # It will output something like the following: # nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc... -- name: check_pull_required | Generate a list of information about the images on a node +- name: check_pull_required | Generate a list of information about the images on a node # noqa 305 image_info_command contains a pipe, therefore requiring shell shell: "{{ image_info_command }}" no_log: true register: docker_images diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml index 234bf1f9557..7f015999f0d 100644 --- a/roles/download/tasks/download_container.yml +++ b/roles/download/tasks/download_container.yml @@ -64,7 +64,7 @@ - not image_is_cached - name: download_container | Save and compress image - shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" + shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa 305 image_save_command_on_localhost contains a pipe, therefore requires shell delegate_to: "{{ download_delegate }}" delegate_facts: no register: container_save_status @@ -104,7 +104,7 @@ - download_force_cache - name: download_container | Load image into docker - shell: "{{ image_load_command }}" + shell: "{{ image_load_command }}" # noqa 305 image_load_command uses pipes, therefore requires shell register: container_load_status failed_when: container_load_status is failed when: diff --git a/roles/download/tasks/prep_download.yml b/roles/download/tasks/prep_download.yml index 8e1d131ca5b..88eb414ab2b 100644 --- a/roles/download/tasks/prep_download.yml +++ b/roles/download/tasks/prep_download.yml @@ -33,7 +33,7 @@ - asserts - name: prep_download | On localhost, check if user has access to docker without using sudo - shell: "{{ image_info_command_on_localhost }}" + shell: "{{ image_info_command_on_localhost }}" # noqa 305 image_info_command_on_localhost contains pipe, therefore requires shell delegate_to: localhost connection: local run_once: true @@ -69,7 +69,7 @@ - asserts - name: prep_download | Register docker images info - shell: "{{ image_info_command }}" + shell: "{{ image_info_command }}" # noqa 305 image_info_command contains pipe therefore requires shell no_log: true register: docker_images failed_when: false diff --git a/roles/download/tasks/prep_kubeadm_images.yml b/roles/download/tasks/prep_kubeadm_images.yml index 411ef5b3ff3..c97c19e0b60 100644 --- a/roles/download/tasks/prep_kubeadm_images.yml +++ b/roles/download/tasks/prep_kubeadm_images.yml @@ -30,7 +30,7 @@ mode: "0755" state: file -- name: prep_kubeadm_images | Generate list of required images +- name: prep_kubeadm_images | Generate list of required images # noqa 306 shell: "{{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -v coredns" register: kubeadm_images_raw run_once: true diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml index 56d5f86c8b9..39df567f64c 100644 --- a/roles/etcd/tasks/configure.yml +++ b/roles/etcd/tasks/configure.yml @@ -1,5 +1,5 @@ --- -- name: Configure | Check if etcd cluster is healthy +- name: Configure | Check if etcd cluster is healthy # noqa 306 shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'" register: etcd_cluster_is_healthy failed_when: false @@ -16,7 +16,7 @@ ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" -- name: Configure | Check if etcd-events cluster is healthy +- name: Configure | Check if etcd-events cluster is healthy # noqa 306 shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'" register: etcd_events_cluster_is_healthy failed_when: false @@ -73,7 +73,7 @@ ignore_errors: "{{ etcd_events_cluster_is_healthy.rc == 0 }}" when: is_etcd_master and etcd_events_cluster_setup -- name: Configure | Wait for etcd cluster to be healthy +- name: Configure | Wait for etcd cluster to be healthy # noqa 306 shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'" register: etcd_cluster_is_healthy until: etcd_cluster_is_healthy.rc == 0 @@ -94,7 +94,7 @@ ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" -- name: Configure | Wait for etcd-events cluster to be healthy +- name: Configure | Wait for etcd-events cluster to be healthy # noqa 306 shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'" register: etcd_events_cluster_is_healthy until: etcd_events_cluster_is_healthy.rc == 0 diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml index e25f13c2f09..7c71e7afee7 100644 --- a/roles/etcd/tasks/gen_certs_script.yml +++ b/roles/etcd/tasks/gen_certs_script.yml @@ -139,7 +139,7 @@ inventory_hostname in groups['k8s-cluster']) and sync_certs|default(false) and inventory_hostname not in groups['etcd'] -- name: Gen_certs | Copy certs on nodes +- name: Gen_certs | Copy certs on nodes # noqa 306 shell: "base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}" args: executable: /bin/bash diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml index a6a197a7422..c4de329067c 100644 --- a/roles/etcd/tasks/join_etcd-events_member.yml +++ b/roles/etcd/tasks/join_etcd-events_member.yml @@ -1,5 +1,5 @@ --- -- name: Join Member | Add member to etcd-events cluster +- name: Join Member | Add member to etcd-events cluster # noqa 301 305 shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}" register: member_add_result until: member_add_result.rc == 0 @@ -24,7 +24,7 @@ {%- endif -%} {%- endfor -%} -- name: Join Member | Ensure member is in etcd-events cluster +- name: Join Member | Ensure member is in etcd-events cluster # noqa 306 shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_events_access_address }}" register: etcd_events_member_in_cluster changed_when: false diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml index e7ee2a348f6..24a800bef1a 100644 --- a/roles/etcd/tasks/join_etcd_member.yml +++ b/roles/etcd/tasks/join_etcd_member.yml @@ -1,5 +1,5 @@ --- -- name: Join Member | Add member to etcd cluster +- name: Join Member | Add member to etcd cluster # noqa 301 305 shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}" register: member_add_result until: member_add_result.rc == 0 @@ -24,7 +24,7 @@ {%- endif -%} {%- endfor -%} -- name: Join Member | Ensure member is in etcd cluster +- name: Join Member | Ensure member is in etcd cluster # noqa 306 shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}" register: etcd_member_in_cluster changed_when: false diff --git a/roles/etcd/tasks/upd_ca_trust.yml b/roles/etcd/tasks/upd_ca_trust.yml index 1f9da04f260..d9b4d5ef8aa 100644 --- a/roles/etcd/tasks/upd_ca_trust.yml +++ b/roles/etcd/tasks/upd_ca_trust.yml @@ -23,14 +23,14 @@ remote_src: true register: etcd_ca_cert -- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS) +- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS) # noqa 503 command: update-ca-certificates when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse"] -- name: Gen_certs | update ca-certificates (RedHat) +- name: Gen_certs | update ca-certificates (RedHat) # noqa 503 command: update-ca-trust extract when: etcd_ca_cert.changed and ansible_os_family == "RedHat" -- name: Gen_certs | update ca-certificates (ClearLinux) +- name: Gen_certs | update ca-certificates (ClearLinux) # noqa 503 command: clrtrust add "{{ ca_cert_path }}" when: etcd_ca_cert.changed and ansible_os_family == "ClearLinux" diff --git a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin-rbac.yml.j2 b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin-rbac.yml.j2 index 4dbaf4e17c3..c0c92de0e57 100644 --- a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin-rbac.yml.j2 +++ b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin-rbac.yml.j2 @@ -8,7 +8,7 @@ metadata: namespace: kube-system --- -# external attacher +# external attacher kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -16,19 +16,16 @@ metadata: rules: - apiGroups: [""] resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "patch"] + verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: [""] resources: ["nodes"] verbs: ["get", "list", "watch"] - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "patch"] + verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["storage.k8s.io"] resources: ["csinodes"] verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments/status"] - verbs: ["patch"] --- @@ -209,4 +206,4 @@ subjects: roleRef: kind: Role name: external-resizer-cfg - apiGroup: rbac.authorization.k8s.io + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 index f6fe0f6240f..fd76073ddfa 100644 --- a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 +++ b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 @@ -23,8 +23,8 @@ spec: image: {{ csi_attacher_image_repo }}:{{ csi_attacher_image_tag }} imagePullPolicy: {{ k8s_image_pull_policy }} args: - - "--v=5" - "--csi-address=$(ADDRESS)" + - "--timeout=3m" {% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %} - --leader-election - --leader-election-namespace=kube-system @@ -40,6 +40,7 @@ spec: imagePullPolicy: {{ k8s_image_pull_policy }} args: - "--csi-address=$(ADDRESS)" + - "--timeout=3m" {% if cinder_topology is defined and cinder_topology %} - --feature-gates=Topology=true {% endif %} diff --git a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-driver.yml.j2 b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-driver.yml.j2 index 2f9f8a10adc..5b681e4c4dd 100644 --- a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-driver.yml.j2 +++ b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-driver.yml.j2 @@ -5,3 +5,6 @@ metadata: spec: attachRequired: true podInfoOnMount: true + volumeLifecycleModes: + - Persistent + - Ephemeral diff --git a/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml index 860ca0e3ae1..4a3ebff4d24 100644 --- a/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml +++ b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml @@ -32,7 +32,7 @@ register: helmcert_master run_once: true -- name: Gen_helm_tiller_certs | run cert generation script +- name: Gen_helm_tiller_certs | run cert generation script # noqa 301 run_once: yes delegate_to: "{{ groups['kube-master'][0] }}" command: "{{ helm_script_dir }}/helm-make-ssl.sh -e {{ helm_home_dir }} -d {{ helm_tiller_cert_dir }}" @@ -57,7 +57,7 @@ with_items: - "{{ helm_client_certs }}" -- name: Gen_helm_tiller_certs | Gather helm client certs +- name: Gen_helm_tiller_certs | Gather helm client certs # noqa 306 # noqa 303 - tar is called intentionally here, but maybe this should be done with the slurp module shell: "tar cfz - -C {{ helm_home_dir }} {{ helm_client_certs|join(' ') }} | base64 --wrap=0" args: @@ -85,7 +85,7 @@ mode: "0600" when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0] -- name: Gen_helm_tiller_certs | Unpack helm certs on masters +- name: Gen_helm_tiller_certs | Unpack helm certs on masters # noqa 306 shell: "base64 -d < {{ helm_cert_tempfile.path }} | tar xz -C {{ helm_home_dir }}" no_log: true changed_when: false diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index a830f563d49..5887ce3c895 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -52,7 +52,7 @@ - helm_version is version('v3.0.0', '<') # FIXME: https://github.com/helm/helm/issues/6374 -- name: Helm | Install/upgrade helm +- name: Helm | Install/upgrade helm # noqa 306 shell: > {{ bin_dir }}/helm init --tiller-namespace={{ tiller_namespace }} {% if helm_skip_refresh %} --skip-refresh{% endif %} @@ -78,7 +78,7 @@ environment: "{{ proxy_env }}" # FIXME: https://github.com/helm/helm/issues/4063 -- name: Helm | Force apply tiller overrides if necessary +- name: Helm | Force apply tiller overrides if necessary # noqa 306 shell: > {{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ tiller_namespace }} {% if helm_skip_refresh %} --skip-refresh{% endif %} @@ -108,7 +108,7 @@ - helm_version is version('v3.0.0', '>=') - helm_stable_repo_url is defined -- name: Make sure bash_completion.d folder exists +- name: Make sure bash_completion.d folder exists # noqa 503 file: name: "/etc/bash_completion.d/" state: directory @@ -116,7 +116,7 @@ - ((helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed)) - ansible_os_family in ["ClearLinux"] -- name: Helm | Set up bash completion +- name: Helm | Set up bash completion # noqa 503 shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh" when: - ((helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed)) diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml index 9528aa02dc0..af902c11cb0 100644 --- a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: "calico upgrade complete" - shell: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml" + command: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml" when: - inventory_hostname == groups['kube-master'][0] - calico_upgrade_enabled|default(True) diff --git a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml index 347d1b4c2d2..e9de24b5277 100644 --- a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml +++ b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Rotate Tokens | Get default token name +- name: Rotate Tokens | Get default token name # noqa 306 shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets -o custom-columns=name:{.metadata.name} --no-headers | grep -m1 default-token" register: default_token changed_when: false @@ -29,7 +29,7 @@ # FIXME(mattymo): Exclude built in secrets that were automatically rotated, # instead of filtering manually -- name: Rotate Tokens | Get all serviceaccount tokens to expire +- name: Rotate Tokens | Get all serviceaccount tokens to expire # noqa 306 shell: >- {{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets --all-namespaces -o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}' diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml index bbb1ce0e029..2baeadf234e 100644 --- a/roles/kubernetes/client/tasks/main.yml +++ b/roles/kubernetes/client/tasks/main.yml @@ -48,7 +48,7 @@ timeout: 180 # NOTE(mattymo): Please forgive this workaround -- name: Generate admin kubeconfig with external api endpoint +- name: Generate admin kubeconfig with external api endpoint # noqa 302 shell: >- mkdir -p {{ kube_config_dir }}/external_kubeconfig && {{ bin_dir }}/kubeadm diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml index bf2c2687972..75435095abd 100644 --- a/roles/kubernetes/kubeadm/tasks/main.yml +++ b/roles/kubernetes/kubeadm/tasks/main.yml @@ -22,7 +22,7 @@ delegate_to: "{{ groups['kube-master'][0] }}" run_once: true -- name: Calculate kubeadm CA cert hash +- name: Calculate kubeadm CA cert hash # noqa 306 shell: openssl x509 -pubkey -in {{ kube_cert_dir }}/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //' register: kubeadm_ca_hash when: @@ -107,7 +107,7 @@ # FIXME(mattymo): Need to point to localhost, otherwise masters will all point # incorrectly to first master, creating SPoF. -- name: Update server field in kube-proxy kubeconfig +- name: Update server field in kube-proxy kubeconfig # noqa 306 shell: >- {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml | sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g' @@ -132,7 +132,7 @@ mode: "0644" - name: Restart all kube-proxy pods to ensure that they load the new configmap - shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0" + command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0" run_once: true delegate_to: "{{ groups['kube-master']|first }}" delegate_facts: false @@ -158,7 +158,7 @@ # FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776 # is fixed - name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services - shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy" + command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy" run_once: true delegate_to: "{{ groups['kube-master']|first }}" when: diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml index 4c5eba4dfa4..0c4b2a9ddb6 100644 --- a/roles/kubernetes/master/handlers/main.yml +++ b/roles/kubernetes/master/handlers/main.yml @@ -93,7 +93,8 @@ - name: Master | wait for kube-scheduler uri: - url: http://localhost:10251/healthz + url: https://localhost:10259/healthz + validate_certs: no register: scheduler_result until: scheduler_result.status == 200 retries: 60 @@ -101,7 +102,8 @@ - name: Master | wait for kube-controller-manager uri: - url: http://localhost:10252/healthz + url: https://localhost:10257/healthz + validate_certs: no register: controller_manager_result until: controller_manager_result.status == 200 retries: 60 @@ -111,8 +113,6 @@ uri: url: "{{ kube_apiserver_endpoint }}/healthz" validate_certs: no - client_cert: "{{ kube_apiserver_client_cert }}" - client_key: "{{ kube_apiserver_client_key }}" register: result until: result.status == 200 retries: 60 diff --git a/roles/kubernetes/master/tasks/kubeadm-setup.yml b/roles/kubernetes/master/tasks/kubeadm-setup.yml index 00f6c77b1d7..5dd29058829 100644 --- a/roles/kubernetes/master/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/master/tasks/kubeadm-setup.yml @@ -47,7 +47,7 @@ when: - old_apiserver_cert.stat.exists -- name: kubeadm | Forcefully delete old static pods +- name: kubeadm | Forcefully delete old static pods # noqa 306 shell: "docker ps -f name=k8s_{{ item }} -q | xargs --no-run-if-empty docker rm -f" with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] when: diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml index d6ce320ba03..06c3eb5250f 100644 --- a/roles/kubernetes/master/tasks/pre-upgrade.yml +++ b/roles/kubernetes/master/tasks/pre-upgrade.yml @@ -8,7 +8,7 @@ register: kube_apiserver_manifest_replaced when: etcd_secret_changed|default(false) -- name: "Pre-upgrade | Delete master containers forcefully" +- name: "Pre-upgrade | Delete master containers forcefully" # noqa 306 503 shell: "docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f" with_items: - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index 21300e3adc4..b2e78e4c65b 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -46,7 +46,7 @@ - kube-proxy - name: Verify if br_netfilter module exists - shell: "modinfo br_netfilter" + command: "modinfo br_netfilter" environment: PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management register: modinfo_br_netfilter diff --git a/roles/kubernetes/node/tasks/pre_upgrade.yml b/roles/kubernetes/node/tasks/pre_upgrade.yml index 78a39567c15..918edfac5aa 100644 --- a/roles/kubernetes/node/tasks/pre_upgrade.yml +++ b/roles/kubernetes/node/tasks/pre_upgrade.yml @@ -1,5 +1,5 @@ --- -- name: "Pre-upgrade | check if kubelet container exists" +- name: "Pre-upgrade | check if kubelet container exists" # noqa 306 shell: >- {% if container_manager in ['crio', 'docker'] %} docker ps -af name=kubelet | grep kubelet diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml index fd4cec362bb..097ba1d73b3 100644 --- a/roles/kubernetes/preinstall/handlers/main.yml +++ b/roles/kubernetes/preinstall/handlers/main.yml @@ -29,7 +29,7 @@ - Preinstall | reload kubelet when: is_fedora_coreos -- name: Preinstall | reload NetworkManager +- name: Preinstall | reload NetworkManager # noqa 303 command: systemctl restart NetworkManager.service when: is_fedora_coreos diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml index 987a4643a54..599289d90f6 100644 --- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml @@ -158,7 +158,7 @@ when: - kube_network_plugin == 'calico' -- name: "Get current version of calico cluster version" +- name: "Get current version of calico cluster version" # noqa 306 shell: "{{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'" register: calico_version_on_server run_once: yes diff --git a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml index 0a4cd9ef3a5..784233c20b6 100644 --- a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml @@ -25,13 +25,13 @@ is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}" - name: check resolvconf - shell: which resolvconf + command: which resolvconf register: resolvconf failed_when: false changed_when: false check_mode: no -- name: check systemd-resolved +- name: check systemd-resolved # noqa 303 command: systemctl is-active systemd-resolved register: systemd_resolved_enabled failed_when: false diff --git a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml index 44b99a5718d..d447c70e4c9 100644 --- a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml +++ b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml @@ -1,6 +1,6 @@ --- - name: Update package management cache (zypper) - SUSE - shell: zypper -n --gpg-auto-import-keys ref + command: zypper -n --gpg-auto-import-keys ref register: make_cache_output until: make_cache_output is succeeded retries: 4 diff --git a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml index b00c576eda8..69aa6518609 100644 --- a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml +++ b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml @@ -20,12 +20,12 @@ changed_when: False register: fs_type -- name: run growpart +- name: run growpart # noqa 503 command: growpart /dev/sda 1 when: growpart_needed.changed environment: LC_ALL: C -- name: run xfs_growfs +- name: run xfs_growfs # noqa 503 command: xfs_growfs /dev/sda1 when: growpart_needed.changed and 'XFS' in fs_type.stdout diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml index 9507a9323bd..ff0983bb329 100644 --- a/roles/kubernetes/tokens/tasks/gen_tokens.yml +++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml @@ -35,14 +35,14 @@ when: gen_tokens|default(false) - name: Gen_tokens | Get list of tokens from first master - shell: "(find {{ kube_token_dir }} -maxdepth 1 -type f)" + command: "find {{ kube_token_dir }} -maxdepth 1 -type f" register: tokens_list check_mode: no delegate_to: "{{ groups['kube-master'][0] }}" run_once: true when: sync_tokens|default(false) -- name: Gen_tokens | Gather tokens +- name: Gen_tokens | Gather tokens # noqa 306 shell: "tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0" args: warn: false @@ -52,7 +52,7 @@ run_once: true when: sync_tokens|default(false) -- name: Gen_tokens | Copy tokens on masters +- name: Gen_tokens | Copy tokens on masters # noqa 306 shell: "echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /" when: - inventory_hostname in groups['kube-master'] diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml index 5b80cf1ac41..6b19e31b5b5 100644 --- a/roles/network_plugin/calico/rr/tasks/main.yml +++ b/roles/network_plugin/calico/rr/tasks/main.yml @@ -4,6 +4,7 @@ - name: Calico-rr | Fetch current node object command: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }} -ojson" + changed_when: false register: calico_rr_node until: calico_rr_node is succeeded delay: "{{ retry_stagger | random + 3 }}" @@ -15,12 +16,12 @@ {{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp': { 'routeReflectorClusterID': cluster_id }}}, recursive=True) }} -- name: Calico-rr | Configure route reflector +- name: Calico-rr | Configure route reflector # noqa 301 305 shell: "{{ bin_dir }}/calicoctl.sh replace -f-" args: stdin: "{{ calico_rr_node_patched | to_json }}" -- name: Calico-rr | Set label for route reflector +- name: Calico-rr | Set label for route reflector # noqa 301 command: >- {{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} 'i-am-a-route-reflector=true' --overwrite diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml index 99888e21616..dc92912fc42 100644 --- a/roles/network_plugin/calico/tasks/check.yml +++ b/roles/network_plugin/calico/tasks/check.yml @@ -37,7 +37,7 @@ when: - "calico_vxlan_mode in ['Always', 'CrossSubnet']" -- name: "Get current version of calico cluster version" +- name: "Get current version of calico cluster version" # noqa 306 shell: "{{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'" register: calico_version_on_server run_once: yes diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml index 77aeba6ef6f..85a77f7e308 100644 --- a/roles/network_plugin/calico/tasks/install.yml +++ b/roles/network_plugin/calico/tasks/install.yml @@ -6,7 +6,7 @@ mode: 0755 remote_src: yes -- name: Calico | Check if host has NetworkManager +- name: Calico | Check if host has NetworkManager # noqa 303 command: systemctl show NetworkManager register: nm_check failed_when: false @@ -84,7 +84,7 @@ run_once: true when: calico_datastore == "etcd" -- name: Calico | Check if calico network pool has already been configured +- name: Calico | Check if calico network pool has already been configured # noqa 306 shell: > {{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l register: calico_conf @@ -131,7 +131,7 @@ loop_control: label: "{{ item.item.file }}" -- name: Calico | Configure calico network pool (version < v3.3.0) +- name: Calico | Configure calico network pool (version < v3.3.0) # noqa 306 shell: > echo " { "kind": "IPPool", @@ -149,7 +149,7 @@ - 'calico_conf.stdout == "0"' - calico_version is version("v3.3.0", "<") -- name: Calico | Configure calico network pool (version >= v3.3.0) +- name: Calico | Configure calico network pool (version >= v3.3.0) # noqa 306 shell: > echo " { "kind": "IPPool", @@ -176,7 +176,7 @@ - inventory_hostname in groups['k8s-cluster'] run_once: yes -- name: Calico | Set global as_num +- name: Calico | Set global as_num # noqa 306 shell: > echo ' { "kind": "BGPConfiguration", @@ -192,7 +192,7 @@ when: - inventory_hostname == groups['kube-master'][0] -- name: Calico | Configure peering with router(s) at global scope +- name: Calico | Configure peering with router(s) at global scope # noqa 306 shell: > echo '{ "apiVersion": "projectcalico.org/v3", @@ -214,7 +214,7 @@ - inventory_hostname == groups['kube-master'][0] - peer_with_router|default(false) -- name: Calico | Configure peering with route reflectors at global scope +- name: Calico | Configure peering with route reflectors at global scope # noqa 306 shell: | echo '{ "apiVersion": "projectcalico.org/v3", @@ -236,7 +236,7 @@ - inventory_hostname == groups['kube-master'][0] - peer_with_calico_rr|default(false) -- name: Calico | Configure route reflectors to peer with each other +- name: Calico | Configure route reflectors to peer with each other # noqa 306 shell: > echo '{ "apiVersion": "projectcalico.org/v3", @@ -309,7 +309,7 @@ - inventory_hostname not in groups['kube-master'] - calico_datastore == "kdd" -- name: Calico | Configure node asNumber for per node peering +- name: Calico | Configure node asNumber for per node peering # noqa 306 shell: > echo '{ "apiVersion": "projectcalico.org/v3", @@ -333,7 +333,7 @@ - local_as is defined - groups['calico-rr'] | default([]) | length == 0 -- name: Calico | Configure peering with router(s) at node scope +- name: Calico | Configure peering with router(s) at node scope # noqa 306 shell: > echo '{ "apiVersion": "projectcalico.org/v3", diff --git a/roles/network_plugin/calico/tasks/pre.yml b/roles/network_plugin/calico/tasks/pre.yml index e798142f315..aaae21bcdd7 100644 --- a/roles/network_plugin/calico/tasks/pre.yml +++ b/roles/network_plugin/calico/tasks/pre.yml @@ -1,5 +1,5 @@ --- -- name: Calico | Get kubelet hostname +- name: Calico | Get kubelet hostname # noqa 306 shell: >- {{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address' | egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1 diff --git a/roles/network_plugin/calico/tasks/reset.yml b/roles/network_plugin/calico/tasks/reset.yml index 1cdab126238..0d7a4bf72eb 100644 --- a/roles/network_plugin/calico/tasks/reset.yml +++ b/roles/network_plugin/calico/tasks/reset.yml @@ -10,9 +10,10 @@ - name: reset | get remaining routes set by bird command: ip route show proto bird + changed_when: false register: bird_routes -- name: reset | remove remaining routes set by bird +- name: reset | remove remaining routes set by bird # noqa 301 command: "ip route del {{ bird_route }} proto bird" with_items: "{{ bird_routes.stdout_lines }}" loop_control: diff --git a/roles/network_plugin/calico/tasks/upgrade.yml b/roles/network_plugin/calico/tasks/upgrade.yml index a4b7cffd65c..0dceac84058 100644 --- a/roles/network_plugin/calico/tasks/upgrade.yml +++ b/roles/network_plugin/calico/tasks/upgrade.yml @@ -16,11 +16,11 @@ - "etcdv2" - "etcdv3" -- name: "Tests data migration (dry-run)" +- name: "Tests data migration (dry-run)" # noqa 301 305 shell: "{{ bin_dir }}/calico-upgrade dry-run --output-dir=/tmp --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml" register: calico_upgrade_test_data failed_when: '"Successfully" not in calico_upgrade_test_data.stdout' -- name: "If test migration is success continue with calico data real migration" +- name: "If test migration is success continue with calico data real migration" # noqa 301 305 shell: "{{ bin_dir }}/calico-upgrade start --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml --output-dir=/tmp/calico_upgrade" register: calico_upgrade_migration_data diff --git a/roles/network_plugin/contiv/tasks/pre-reset.yml b/roles/network_plugin/contiv/tasks/pre-reset.yml index a811d59213f..19f6a97983a 100644 --- a/roles/network_plugin/contiv/tasks/pre-reset.yml +++ b/roles/network_plugin/contiv/tasks/pre-reset.yml @@ -23,7 +23,7 @@ - name: reset | Copy contiv temporary cleanup script copy: - src: ../files/contiv-cleanup.sh # Not in role_path so we must trick... + src: ../files/contiv-cleanup.sh # noqa 404 Not in role_path so we must trick... dest: /opt/cni/bin/cleanup owner: root group: root @@ -33,7 +33,7 @@ - name: reset | Lay down contiv cleanup template template: - src: ../templates/contiv-cleanup.yml.j2 # Not in role_path so we must trick... + src: ../templates/contiv-cleanup.yml.j2 # noqa 404 Not in role_path so we must trick... dest: "{{ kube_config_dir }}/contiv-cleanup.yml" # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset register: contiv_cleanup_manifest when: diff --git a/roles/network_plugin/kube-ovn/tasks/main.yml b/roles/network_plugin/kube-ovn/tasks/main.yml index 308b1c62521..c416f120aad 100644 --- a/roles/network_plugin/kube-ovn/tasks/main.yml +++ b/roles/network_plugin/kube-ovn/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Kube-OVN | Label ovn-db node - shell: >- + command: >- {{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master when: - inventory_hostname == groups['kube-master'][0] diff --git a/roles/network_plugin/macvlan/tasks/main.yml b/roles/network_plugin/macvlan/tasks/main.yml index 751c3471623..d965bfa96c5 100644 --- a/roles/network_plugin/macvlan/tasks/main.yml +++ b/roles/network_plugin/macvlan/tasks/main.yml @@ -1,6 +1,7 @@ --- - name: Macvlan | Retrieve Pod Cidr command: "{{ bin_dir }}/kubectl get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'" + changed_when: false register: node_pod_cidr_cmd delegate_to: "{{ groups['kube-master'][0] }}" @@ -11,6 +12,7 @@ - name: Macvlan | Retrieve default gateway network interface become: false raw: ip -4 route list 0/0 | sed 's/.*dev \([[:alnum:]]*\).*/\1/' + changed_when: false register: node_default_gateway_interface_cmd - name: Macvlan | set node_default_gateway_interface diff --git a/roles/network_plugin/multus/defaults/main.yml b/roles/network_plugin/multus/defaults/main.yml index 768efbf8d22..cbeb4cb323e 100644 --- a/roles/network_plugin/multus/defaults/main.yml +++ b/roles/network_plugin/multus/defaults/main.yml @@ -1,7 +1,7 @@ --- multus_conf_file: "auto" multus_cni_conf_dir_host: "/etc/cni/net.d" -multus_cni_bin_dir_host: "{{ '/usr/libexec/cni' if container_manager == 'crio' else '/opt/cni/bin' }}" +multus_cni_bin_dir_host: "/opt/cni/bin" multus_cni_run_dir_host: "/run" multus_cni_conf_dir: "{{ ('/host', multus_cni_conf_dir_host) | join }}" multus_cni_bin_dir: "{{ ('/host', multus_cni_bin_dir_host) | join }}" diff --git a/roles/recover_control_plane/etcd/tasks/main.yml b/roles/recover_control_plane/etcd/tasks/main.yml index 64cac81dad7..ac32922839a 100644 --- a/roles/recover_control_plane/etcd/tasks/main.yml +++ b/roles/recover_control_plane/etcd/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Get etcd endpoint health - shell: "{{ bin_dir }}/etcdctl endpoint health" + command: "{{ bin_dir }}/etcdctl endpoint health" register: etcd_endpoint_health ignore_errors: true changed_when: false @@ -58,7 +58,7 @@ - "item.rc != 0 and not 'No such file or directory' in item.stderr" - name: Get etcd cluster members - shell: "{{ bin_dir }}/etcdctl member list" + command: "{{ bin_dir }}/etcdctl member list" register: member_list changed_when: false check_mode: no @@ -74,7 +74,7 @@ - has_quorum - name: Remove broken cluster members - shell: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}" + command: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}" environment: ETCDCTL_API: 3 ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" diff --git a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml index dc101180584..bef89f192bb 100644 --- a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml +++ b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml @@ -1,6 +1,6 @@ --- - name: Save etcd snapshot - shell: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db" + command: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db" environment: - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" - ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" @@ -25,7 +25,7 @@ path: "{{ etcd_data_dir }}" state: absent -- name: Restore etcd snapshot +- name: Restore etcd snapshot # noqa 301 305 shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}" environment: - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" diff --git a/roles/recover_control_plane/master/tasks/main.yml b/roles/recover_control_plane/master/tasks/main.yml index 71a0941682b..5f4b6a922a1 100644 --- a/roles/recover_control_plane/master/tasks/main.yml +++ b/roles/recover_control_plane/master/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Wait for apiserver - shell: "{{ bin_dir }}/kubectl get nodes" + command: "{{ bin_dir }}/kubectl get nodes" environment: - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" register: apiserver_is_ready @@ -11,7 +11,7 @@ when: groups['broken_kube-master'] - name: Delete broken kube-master nodes from cluster - shell: "{{ bin_dir }}/kubectl delete node {{ item }}" + command: "{{ bin_dir }}/kubectl delete node {{ item }}" environment: - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" with_items: "{{ groups['broken_kube-master'] }}" diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml index 37aac0df239..c4660ef87c7 100644 --- a/roles/remove-node/post-remove/tasks/main.yml +++ b/roles/remove-node/post-remove/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Delete node +- name: Delete node # noqa 301 command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}" delegate_to: "{{ groups['kube-master']|first }}" ignore_errors: yes \ No newline at end of file diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml index f287aa3dd95..32421c1a38d 100644 --- a/roles/remove-node/pre-remove/tasks/main.yml +++ b/roles/remove-node/pre-remove/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: cordon-node | Mark all nodes as unschedulable before drain +- name: cordon-node | Mark all nodes as unschedulable before drain # noqa 301 command: >- {{ bin_dir }}/kubectl cordon {{ hostvars[item]['kube_override_hostname']|default(item) }} with_items: @@ -9,7 +9,7 @@ run_once: true ignore_errors: yes -- name: remove-node | Drain node except daemonsets resource +- name: remove-node | Drain node except daemonsets resource # noqa 301 command: >- {{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf drain --force diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml index 1b126e65815..0fc0f7fb4d8 100644 --- a/roles/remove-node/remove-etcd-node/tasks/main.yml +++ b/roles/remove-node/remove-etcd-node/tasks/main.yml @@ -18,7 +18,7 @@ - inventory_hostname in groups['etcd'] - name: Lookup etcd member id - shell: "{{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d: -f1" + shell: "{{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d, -f1" register: etcd_member_id ignore_errors: true changed_when: false @@ -35,7 +35,7 @@ when: inventory_hostname in groups['etcd'] - name: Remove etcd member from cluster - shell: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}" + command: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}" register: etcd_member_in_cluster changed_when: false check_mode: no diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 5fd98fd6f91..bbc76eebe00 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -41,12 +41,12 @@ tags: - docker -- name: reset | systemctl daemon-reload +- name: reset | systemctl daemon-reload # noqa 503 systemd: daemon_reload: true when: services_removed.changed or docker_dropins_removed.changed -- name: reset | remove all containers +- name: reset | remove all containers # noqa 306 shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv" register: remove_all_containers retries: 4 @@ -56,7 +56,7 @@ tags: - docker -- name: reset | restart docker if needed +- name: reset | restart docker if needed # noqa 503 service: name: docker state: restarted @@ -64,7 +64,7 @@ tags: - docker -- name: reset | stop all cri containers +- name: reset | stop all cri containers # noqa 306 shell: "crictl ps -aq | xargs -r crictl -t 60s stop" register: remove_all_cri_containers retries: 5 @@ -75,7 +75,7 @@ - containerd when: container_manager in ["crio", "containerd"] -- name: reset | remove all cri containers +- name: reset | remove all cri containers # noqa 306 shell: "crictl ps -aq | xargs -r crictl -t 60s rm" register: remove_all_cri_containers retries: 5 @@ -86,7 +86,7 @@ - containerd when: container_manager in ["crio", "containerd"] and deploy_container_engine|default(true) -- name: reset | stop all cri pods +- name: reset | stop all cri pods # noqa 306 shell: "crictl pods -q | xargs -r crictl -t 60s stopp" register: remove_all_cri_containers retries: 5 @@ -97,7 +97,7 @@ - containerd when: container_manager in ["crio", "containerd"] -- name: reset | remove all cri pods +- name: reset | remove all cri pods # noqa 306 shell: "crictl pods -q | xargs -r crictl -t 60s rmp" register: remove_all_cri_containers retries: 5 @@ -130,7 +130,7 @@ tags: - services -- name: reset | gather mounted kubelet dirs +- name: reset | gather mounted kubelet dirs # noqa 306 301 shell: mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac args: warn: false @@ -139,7 +139,7 @@ tags: - mounts -- name: reset | unmount kubelet dirs +- name: reset | unmount kubelet dirs # noqa 301 command: umount -f {{ item }} with_items: "{{ mounted_dirs.stdout_lines }}" register: umount_dir @@ -162,7 +162,7 @@ - iptables - name: Clear IPVS virtual server table - shell: "ipvsadm -C" + command: "ipvsadm -C" when: - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s-cluster'] diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml index 7524e349071..8fd3e5c032c 100644 --- a/scripts/collect-info.yaml +++ b/scripts/collect-info.yaml @@ -112,7 +112,7 @@ {%- endfor %} when: "'etcd' in groups" - - name: Storing commands output + - name: Storing commands output # noqa 306 shell: "{{ item.cmd }} 2>&1 | tee {{ item.name }}" failed_when: false with_items: "{{ commands }}" diff --git a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml index aae85e4092f..08f26694af5 100644 --- a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml +++ b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml @@ -34,7 +34,7 @@ when: - item.value.converted|bool -- name: Resize images +- name: Resize images # noqa 301 command: qemu-img resize {{ images_dir }}/{{ item.key }}.qcow2 +8G with_dict: - "{{ images }}" @@ -45,15 +45,15 @@ src: Dockerfile dest: "{{ images_dir }}/Dockerfile" -- name: Create docker images for each OS +- name: Create docker images for each OS # noqa 301 command: docker build -t {{ registry }}/vm-{{ item.key }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }} with_dict: - "{{ images }}" -- name: docker login +- name: docker login # noqa 301 command: docker login -u="{{ docker_user }}" -p="{{ docker_password }}" "{{ docker_host }}" -- name: docker push image +- name: docker push image # noqa 301 command: docker push {{ registry }}/vm-{{ item.key }}:latest with_dict: - "{{ images }}" diff --git a/tests/cloud_playbooks/create-aws.yml b/tests/cloud_playbooks/create-aws.yml index eb33d9838ad..a1982edfabc 100644 --- a/tests/cloud_playbooks/create-aws.yml +++ b/tests/cloud_playbooks/create-aws.yml @@ -20,5 +20,5 @@ - name: Template the inventory template: - src: ../templates/inventory-aws.j2 + src: ../templates/inventory-aws.j2 # noqa 404 CI inventory templates are not in role_path dest: "{{ inventory_path }}" diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml index 37fbafbd610..4fa4c33ca54 100644 --- a/tests/cloud_playbooks/create-do.yml +++ b/tests/cloud_playbooks/create-do.yml @@ -88,6 +88,6 @@ - name: Template the inventory template: - src: ../templates/inventory-do.j2 + src: ../templates/inventory-do.j2 # noqa 404 CI templates are not in role_path dest: "{{ inventory_path }}" when: state == 'present' diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml index 2664810799b..f9f474f83ee 100644 --- a/tests/cloud_playbooks/create-gce.yml +++ b/tests/cloud_playbooks/create-gce.yml @@ -49,7 +49,7 @@ add_host: hostname={{ item.public_ip }} groupname="waitfor_hosts" with_items: '{{ gce.instance_data }}' - - name: Template the inventory + - name: Template the inventory # noqa 404 CI inventory templates are not in role_path template: src: ../templates/inventory-gce.j2 dest: "{{ inventory_path }}" @@ -60,7 +60,7 @@ state: directory when: mode in ['scale', 'separate-scale', 'ha-scale'] - - name: Template fake hosts group vars + - name: Template fake hosts group vars # noqa 404 CI templates are not in role_path template: src: ../templates/fake_hosts.yml.j2 dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml" diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml index 939d432a64d..a3932957300 100644 --- a/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml +++ b/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml @@ -29,8 +29,9 @@ loop_control: index_var: vm_id -- name: Wait for vms to have ipaddress assigned +- name: Wait for vms to have ipaddress assigned # noqa 306 shell: "kubectl get vmis -n {{ test_name }} instance-{{ vm_id }} -o json | jq '.status.interfaces[].ipAddress' | tr -d '\"'" + changed_when: false register: vm_ips loop: "{{ range(1, vm_count|int + 1, 1) | list }}" loop_control: diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml index dc66e2db7c4..5cde2e7b027 100644 --- a/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml +++ b/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml @@ -17,7 +17,7 @@ name: "{{ test_name }}" - name: Wait for namespace {{ test_name }} to be fully deleted - shell: kubectl get ns {{ test_name }} + command: kubectl get ns {{ test_name }} register: delete_namespace failed_when: - delete_namespace.rc == 0 diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml index f1e3cbaca52..5ebb6f578c6 100644 --- a/tests/cloud_playbooks/upload-logs-gcs.yml +++ b/tests/cloud_playbooks/upload-logs-gcs.yml @@ -9,6 +9,7 @@ tasks: - name: Generate uniq bucket name prefix raw: date +%Y%m%d + changed_when: false register: out - name: replace_test_id @@ -52,7 +53,7 @@ no_log: True failed_when: false - - name: Apply the lifecycle rules + - name: Apply the lifecycle rules # noqa 301 command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}" environment: BOTO_CONFIG: "{{ dir }}/.boto" diff --git a/tests/testcases/015_check-nodes-ready.yml b/tests/testcases/015_check-nodes-ready.yml index be8370cc3b3..7d2547574be 100644 --- a/tests/testcases/015_check-nodes-ready.yml +++ b/tests/testcases/015_check-nodes-ready.yml @@ -15,16 +15,18 @@ - import_role: name: cluster-dump - - name: Check kubectl output + - name: Check kubectl output # noqa 305 shell: "{{ bin_dir }}/kubectl get nodes" + changed_when: false register: get_nodes no_log: true - debug: msg: "{{ get_nodes.stdout.split('\n') }}" - - name: Check that all nodes are running and ready + - name: Check that all nodes are running and ready # noqa 305 shell: "{{ bin_dir }}/kubectl get nodes --no-headers -o yaml" + changed_when: false register: get_nodes_yaml until: # Check that all nodes are Status=Ready diff --git a/tests/testcases/020_check-pods-running.yml b/tests/testcases/020_check-pods-running.yml index 9679be5fc74..33e0fa368df 100644 --- a/tests/testcases/020_check-pods-running.yml +++ b/tests/testcases/020_check-pods-running.yml @@ -15,16 +15,18 @@ - import_role: name: cluster-dump - - name: Check kubectl output + - name: Check kubectl output # noqa 305 shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide" + changed_when: false register: get_pods no_log: true - debug: msg: "{{ get_pods.stdout.split('\n') }}" - - name: Check that all pods are running and ready + - name: Check that all pods are running and ready # noqa 305 shell: "{{ bin_dir }}/kubectl get pods --all-namespaces --no-headers -o yaml" + changed_when: false register: run_pods_log until: # Check that all pods are running @@ -36,8 +38,9 @@ failed_when: false no_log: true - - name: Check kubectl output + - name: Check kubectl output # noqa 305 shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide" + changed_when: false register: get_pods no_log: true diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml index bee470ef753..c2dd62d9077 100644 --- a/tests/testcases/030_check-network.yml +++ b/tests/testcases/030_check-network.yml @@ -15,11 +15,12 @@ bin_dir: "/usr/local/bin" when: not ansible_os_family in ["CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk"] - - name: Create test namespace + - name: Create test namespace # noqa 301 305 shell: "{{ bin_dir }}/kubectl create namespace test" - - name: Run 2 busybox pods in test ns + - name: Run 2 busybox pods in test ns # noqa 305 shell: "{{ bin_dir }}/kubectl run {{ item }} --image={{ test_image_repo }}:{{ test_image_tag }} --namespace test --command -- tail -f /dev/null" + changed_when: false loop: - busybox1 - busybox2 @@ -27,8 +28,9 @@ - import_role: name: cluster-dump - - name: Check that all pods are running and ready + - name: Check that all pods are running and ready # noqa 305 shell: "{{ bin_dir }}/kubectl get pods --namespace test --no-headers -o yaml" + changed_when: false register: run_pods_log until: # Check that all pods are running @@ -40,8 +42,9 @@ failed_when: false no_log: true - - name: Get pod names + - name: Get pod names # noqa 305 shell: "{{ bin_dir }}/kubectl get pods -n test -o json" + changed_when: false register: pods no_log: true @@ -52,17 +55,20 @@ - name: Get hostnet pods command: "{{ bin_dir }}/kubectl get pods -n test -o jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'" + changed_when: false register: hostnet_pods no_log: true - name: Get running pods command: "{{ bin_dir }}/kubectl get pods -n test -o jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'" + changed_when: False register: running_pods no_log: true - - name: Check kubectl output + - name: Check kubectl output # noqa 305 shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide" + changed_when: False register: get_pods no_log: true @@ -90,7 +96,7 @@ with_items: "{{ pod_ips }}" - name: Ping between pods is working - shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}" + command: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}" when: - not item[0] in pods_hostnet - not item[1] in pods_hostnet @@ -99,7 +105,7 @@ - "{{ pod_ips }}" - name: Ping between hostnet pods is working - shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}" + command: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}" when: - item[0] in pods_hostnet - item[1] in pods_hostnet diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml index e6ea13a24b1..9cc38cfdb5b 100644 --- a/tests/testcases/040_check-network-adv.yml +++ b/tests/testcases/040_check-network-adv.yml @@ -1,7 +1,7 @@ --- - hosts: kube-node tasks: - - name: Test tunl0 routes + - name: Test tunl0 routes # noqa 306 shell: "! /sbin/ip ro | grep '/26 via' | grep -v tunl0" when: - (ipip|default(true) or cloud_provider is defined) @@ -15,7 +15,7 @@ tasks: - name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282) - shell: "ethtool --offload flannel.1 rx off tx off" + command: "ethtool --offload flannel.1 rx off tx off" ignore_errors: true when: - kube_network_plugin|default('calico') == 'flannel' @@ -33,7 +33,7 @@ - import_role: name: cluster-dump - - name: Wait for netchecker server + - name: Wait for netchecker server # noqa 306 shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep ^netchecker-server" register: ncs_pod until: ncs_pod.stdout.find('Running') != -1 @@ -41,7 +41,7 @@ delay: 10 when: inventory_hostname == groups['kube-master'][0] - - name: Wait for netchecker agents + - name: Wait for netchecker agents # noqa 306 shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep '^netchecker-agent-.*Running'" register: nca_pod until: nca_pod.stdout_lines|length >= groups['k8s-cluster']|intersect(ansible_play_hosts)|length * 2 @@ -215,7 +215,7 @@ - kube_network_plugin_multus|default(false)|bool - name: Check secondary macvlan interface - shell: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1" + command: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1" register: output until: output.rc == 0 retries: 90 diff --git a/tests/testcases/roles/cluster-dump/tasks/main.yml b/tests/testcases/roles/cluster-dump/tasks/main.yml index e1d5d35a6bc..589a712e0ec 100644 --- a/tests/testcases/roles/cluster-dump/tasks/main.yml +++ b/tests/testcases/roles/cluster-dump/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Generate dump folder - shell: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump" + command: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump" no_log: true when: inventory_hostname in groups['kube-master']