diff --git a/.dockerignore b/.dockerignore index 5e7ebff64e..ab75c635fe 100644 --- a/.dockerignore +++ b/.dockerignore @@ -7,4 +7,5 @@ test/fixtures/*/.terraform test/fixtures/*/terraform.tfstate.d examples/.kitchen examples/*/.terraform -examples/*/terraform.tfstate.d \ No newline at end of file +examples/*/terraform.tfstate.d + diff --git a/.kitchen.yml b/.kitchen.yml index 6bf414c21f..9f5df5a03e 100644 --- a/.kitchen.yml +++ b/.kitchen.yml @@ -29,13 +29,15 @@ platforms: - name: local suites: - - name: "deploy_service" - driver: - root_module_directory: test/fixtures/deploy_service - verifier: - systems: - - name: deploy_service - backend: local +# Disabled due to issue #274 +# (https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/274) +# - name: "deploy_service" +# driver: +# root_module_directory: test/fixtures/deploy_service +# verifier: +# systems: +# - name: deploy_service +# backend: local - name: "disable_client_cert" driver: root_module_directory: test/fixtures/disable_client_cert @@ -43,13 +45,15 @@ suites: systems: - name: disable_client_cert backend: local - - name: "node_pool" - driver: - root_module_directory: test/fixtures/node_pool - verifier: - systems: - - name: node_pool - backend: local +# Disabled due to issue #274 +# (https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/274) +# - name: "node_pool" +# driver: +# root_module_directory: test/fixtures/node_pool +# verifier: +# systems: +# - name: node_pool +# backend: local - name: "shared_vpc" driver: root_module_directory: test/fixtures/shared_vpc @@ -98,12 +102,14 @@ suites: systems: - name: stub_domains backend: local - - name: stub_domains_private - driver: - root_module_directory: test/fixtures/stub_domains_private - systems: - - name: stub_domains_private - backend: local +# Disabled due to issue #264 +# (https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/264) +# - name: stub_domains_private +# driver: +# root_module_directory: test/fixtures/stub_domains_private +# systems: +# - name: stub_domains_private +# backend: local - name: "upstream_nameservers" driver: root_module_directory: test/fixtures/upstream_nameservers diff --git a/.ruby-version b/.ruby-version deleted file mode 100644 index aedc15bb0c..0000000000 --- a/.ruby-version +++ /dev/null @@ -1 +0,0 @@ -2.5.3 diff --git a/CHANGELOG.md b/CHANGELOG.md index ec280dddc3..41c9c6f947 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,13 +7,34 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 Extending the adopted spec, each change should have a link to its corresponding pull request appended. ## [Unreleased] -### Added + +### Changed * Made `region` variable optional for zonal clusters [#247] + +## [v5.0.0] - 2019-09-25 +v5.0.0 is a backwards-incompatible release. Please see the [upgrading guide](./docs/upgrading_to_v5.0.md). + +The v5.0.0 module requires using the [2.12 version](https://github.com/terraform-providers/terraform-provider-google/blob/master/CHANGELOG.md#2120-august-01-2019) of the Google provider. + +### Changed + +* **Breaking**: Enabled metadata-concealment by default [#248] +* All beta functionality removed from non-beta clusters, moved `node_pool_taints` to beta modules [#228] + +### Added +* Added support for resource usage export config [#238] +* Added `sandbox_enabled` variable to use GKE Sandbox [#241] + * Added `grant_registry_access` variable to grant Container Registry access to created SA [#236] * Support for Intranode Visbiility (IV) and Veritical Pod Autoscaling (VPA) beta features [#216] * Support for Workload Identity beta feature [#234] * Support for Google Groups based RBAC beta feature [#217] +* Support for disabling node pool autoscaling by setting `autoscaling` to `false` within the node pool variable. [#250] + +### Fixed + +* Fixed issue with passing a dynamically created Service Account to the module. [#27] ## [v4.1.0] 2019-07-24 @@ -32,6 +53,8 @@ Extending the adopted spec, each change should have a link to its corresponding * Supported version of Terraform is 0.12. [#177] ## [v3.0.0] - 2019-07-08 +v3.0.0 is a breaking release. Refer to the +[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. ### Added @@ -72,6 +95,8 @@ Extending the adopted spec, each change should have a link to its corresponding 2.3. [#148] ## [v2.0.0] - 2019-04-12 +v2.0.0 is a breaking release. Refer to the +[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. ### Added @@ -103,6 +128,10 @@ Extending the adopted spec, each change should have a link to its corresponding * Fix empty zone list. [#132] ## [v1.0.0] - 2019-03-25 +Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. + +In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. + ### Added * Allow creation of service accounts. [#80] * Add support for private clusters via submodule. [#69] @@ -157,7 +186,8 @@ Extending the adopted spec, each change should have a link to its corresponding * Initial release of module. -[Unreleased]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v4.1.0...HEAD +[Unreleased]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.0.0...HEAD +[v5.0.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v4.1.0...v5.0.0 [v4.1.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v4.0.0...v4.1.0 [v4.0.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v3.0.0...v4.0.0 [v3.0.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v2.1.0...v3.0.0 @@ -171,10 +201,16 @@ Extending the adopted spec, each change should have a link to its corresponding [v0.3.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.2.0...v0.3.0 [v0.2.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.1.0...v0.2.0 +[#248]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/248 [#247]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/247 +[#228]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/228 +[#238]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/238 +[#241]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/241 +[#250]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/250 [#236]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/236 [#217]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/217 [#234]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/234 +[#27]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/27 [#216]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/216 [#214]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/214 [#210]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/210 @@ -229,3 +265,9 @@ Extending the adopted spec, each change should have a link to its corresponding [#15]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/15 [#10]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/10 [#9]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/9 + +[upgrading-to-v2.0]: docs/upgrading_to_v2.0.md +[upgrading-to-v3.0]: docs/upgrading_to_v3.0.md +[terraform-provider-google]: https://github.com/terraform-providers/terraform-provider-google +[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 +[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..cd4943578a --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,122 @@ +# Contributing + +This document provides guidelines for contributing to the module. + +## Dependencies + +The following dependencies must be installed on the development system: + +- [Docker Engine][docker-engine] +- [Google Cloud SDK][google-cloud-sdk] +- [make] + +## Generating Documentation for Inputs and Outputs + +The Inputs and Outputs tables in the READMEs of the root module, +submodules, and example modules are automatically generated based on +the `variables` and `outputs` of the respective modules. These tables +must be refreshed if the module interfaces are changed. + +## Templating + +To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. + +The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. + +Note: The correct sequence to update the repo using autogen functionality is to run +`make docker_generate && make docker_generate_docs`. This will create the various Terraform files, and then +generate the Terraform documentation using `terraform-docs`. + +### Autogeneration of documentation from .tf files +To generate new Inputs and Outputs tables run +``` +make docker_generate_docs +``` + +## Integration Testing + +Integration tests are used to verify the behaviour of the root module, +submodules, and example modules. Additions, changes, and fixes should +be accompanied with tests. + +The integration tests are run using [Kitchen][kitchen], +[Kitchen-Terraform][kitchen-terraform], and [InSpec][inspec]. These +tools are packaged within a Docker image for convenience. + +The general strategy for these tests is to verify the behaviour of the +[example modules](./examples/), thus ensuring that the root module, +submodules, and example modules are all functionally correct. + +Six test-kitchen instances are defined: + +- `deploy-service` +- `node-pool` +- `shared-vpc` +- `simple-regional` +- `simple-zonal` +- `stub-domains` + +The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory.` + +### Test Environment +The easiest way to test the module is in an isolated test project. The setup for such a project is defined in [test/setup](./test/setup/) directory. + +To use this setup, you need a service account with Project Creator access on a folder. Export the Service Account credentials to your environment like so: + +``` +export SERVICE_ACCOUNT_JSON=$(< credentials.json) +``` + +You will also need to set a few environment variables: +``` +export TF_VAR_org_id="your_org_id" +export TF_VAR_folder_id="your_folder_id" +export TF_VAR_billing_account="your_billing_account_id" +``` + +With these settings in place, you can prepare a test project using Docker: +``` +make docker_test_prepare +``` + +### Noninteractive Execution + +Run `make docker_test_integration` to test all of the example modules +noninteractively, using the prepared test project. + +### Interactive Execution + +1. Run `make docker_run` to start the testing Docker container in + interactive mode. + +1. Run `kitchen_do create ` to initialize the working + directory for an example module. + +1. Run `kitchen_do converge ` to apply the example module. + +1. Run `kitchen_do verify ` to test the example module. + +1. Run `kitchen_do destroy ` to destroy the example module + state. + +## Linting and Formatting + +Many of the files in the repository can be linted or formatted to +maintain a standard of quality. + +### Execution + +Run `make docker_test_lint`. + +[docker-engine]: https://www.docker.com/products/docker-engine +[flake8]: http://flake8.pycqa.org/en/latest/ +[gofmt]: https://golang.org/cmd/gofmt/ +[google-cloud-sdk]: https://cloud.google.com/sdk/install +[hadolint]: https://github.com/hadolint/hadolint +[inspec]: https://inspec.io/ +[kitchen-terraform]: https://github.com/newcontext-oss/kitchen-terraform +[kitchen]: https://kitchen.ci/ +[make]: https://en.wikipedia.org/wiki/Make_(software) +[shellcheck]: https://www.shellcheck.net/ +[terraform-docs]: https://github.com/segmentio/terraform-docs +[terraform]: https://terraform.io/ diff --git a/Makefile b/Makefile index 21d7a2764f..5039822a75 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,172 +12,85 @@ # See the License for the specific language governing permissions and # limitations under the License. +# Please note that this file was generated from [terraform-google-module-template](https://github.com/terraform-google-modules/terraform-google-module-template). +# Please make sure to contribute relevant changes upstream! + # Make will use bash instead of sh SHELL := /usr/bin/env bash -# Docker build config variables -CREDENTIALS_PATH ?= /cft/workdir/credentials.json -DOCKER_ORG := gcr.io/cloud-foundation-cicd -DOCKER_TAG_BASE_KITCHEN_TERRAFORM ?= 2.3.0 -DOCKER_REPO_BASE_KITCHEN_TERRAFORM := ${DOCKER_ORG}/cft/kitchen-terraform:${DOCKER_TAG_BASE_KITCHEN_TERRAFORM} - -# All is the first target in the file so it will get picked up when you just run 'make' on its own -.PHONY: all -all: check generate_docs - -.PHONY: check -check: check_shell check_python check_golang check_terraform check_base_files test_check_headers check_headers check_trailing_whitespace check_generate check_generate_docs - -# The .PHONY directive tells make that this isn't a real target and so -# the presence of a file named 'check_shell' won't cause this target to stop -# working -.PHONY: check_shell -check_shell: - @source test/make.sh && check_shell - -.PHONY: check_python -check_python: - @source test/make.sh && check_python - -.PHONY: check_golang -check_golang: - @source test/make.sh && golang - -.PHONY: check_terraform -check_terraform: - @source test/make.sh && check_terraform - -.PHONY: check_base_files -check_base_files: - @source test/make.sh && basefiles +DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 0.1.0 +DOCKER_IMAGE_DEVELOPER_TOOLS := cft/developer-tools +REGISTRY_URL := gcr.io/cloud-foundation-cicd -.PHONY: check_shebangs -check_shebangs: - @source test/make.sh && check_bash - -.PHONY: check_trailing_whitespace -check_trailing_whitespace: - @source test/make.sh && check_trailing_whitespace - -.PHONY: test_check_headers -test_check_headers: - @echo "Testing the validity of the header check" - @python test/test_verify_boilerplate.py - -.PHONY: check_headers -check_headers: - @echo "Checking file headers" - @python test/verify_boilerplate.py - -.PHONY: check_generate -check_generate: ## Check that `make generate` does not generate a diff - @source test/make.sh && check_generate - -.PHONY: check_generate_docs -check_generate_docs: ## Check that `make generate_docs` does not generate a diff - @source test/make.sh && check_generate_docs - -# Integration tests -.PHONY: test_integration -test_integration: - test/ci_integration.sh - -.PHONY: generate_docs -generate_docs: - @source test/make.sh && generate_docs - -.PHONY: generate -generate: - @source test/make.sh && generate - -.PHONY: dev -dev: generate generate_docs - @echo "Updated files" - -# Versioning -.PHONY: version -version: - @source helpers/version-repo.sh - -# Run docker +# Enter docker container for local development .PHONY: docker_run docker_run: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ -e SERVICE_ACCOUNT_JSON \ - -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ - -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "source test/ci_integration.sh && setup_environment && exec /bin/bash" + -v $(CURDIR):/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /bin/bash -.PHONY: docker_create -docker_create: docker_build_kitchen_terraform +# Execute prepare tests within the docker container +.PHONY: docker_test_prepare +docker_test_prepare: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ -e SERVICE_ACCOUNT_JSON \ - -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ - -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen create" - -.PHONY: docker_converge -docker_converge: + -e TF_VAR_org_id \ + -e TF_VAR_folder_id \ + -e TF_VAR_billing_account \ + -v $(CURDIR):/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /usr/local/bin/execute_with_credentials.sh prepare_environment + +# Clean up test environment within the docker container +.PHONY: docker_test_cleanup +docker_test_cleanup: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ -e SERVICE_ACCOUNT_JSON \ - -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ - -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen converge && kitchen converge" - -.PHONY: docker_verify -docker_verify: + -e TF_VAR_org_id \ + -e TF_VAR_folder_id \ + -e TF_VAR_billing_account \ + -v $(CURDIR):/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /usr/local/bin/execute_with_credentials.sh cleanup_environment + +# Execute integration tests within the docker container +.PHONY: docker_test_integration +docker_test_integration: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ -e SERVICE_ACCOUNT_JSON \ - -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ - -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen verify" + -v $(CURDIR):/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /usr/local/bin/test_integration.sh -.PHONY: docker_destroy -docker_destroy: +# Execute lint tests within the docker container +.PHONY: docker_test_lint +docker_test_lint: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ - -e SERVICE_ACCOUNT_JSON \ - -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ - -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen destroy" + -v $(CURDIR):/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /usr/local/bin/test_lint.sh -.PHONY: test_integration_docker -test_integration_docker: +# Generate documentation +.PHONY: docker_generate_docs +docker_generate_docs: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ - -e SERVICE_ACCOUNT_JSON \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "test/ci_integration.sh" + -v $(CURDIR):/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /bin/bash -c 'source /usr/local/bin/task_helper_functions.sh && generate_docs' + +# Generate files from autogen +.PHONY: docker_generate +docker_generate: + docker run --rm -it \ + -v $(CURDIR):/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /bin/bash -c 'source /usr/local/bin/task_helper_functions.sh && generate' + +# Alias for backwards compatibility +.PHONY: generate_docs +generate_docs: docker_generate_docs + +.PHONY: generate +generate: docker_generate diff --git a/README.md b/README.md index 4f92ede67e..706bb2df83 100644 --- a/README.md +++ b/README.md @@ -108,22 +108,6 @@ Then perform the following commands on the root folder: - `terraform apply` to apply the infrastructure build - `terraform destroy` to destroy the built infrastructure -## Upgrade to v3.0.0 - -v3.0.0 is a breaking release. Refer to the -[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. - -## Upgrade to v2.0.0 - -v2.0.0 is a breaking release. Refer to the -[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. - -## Upgrade to v1.0.0 - -Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. - -In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. - ## Inputs @@ -162,7 +146,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | | node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | -| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | @@ -249,141 +232,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew [upgrading-to-v2.0]: docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: docs/upgrading_to_v3.0.md diff --git a/autogen/README.md b/autogen/README.md index 5b44ad2f14..421e4a2605 100644 --- a/autogen/README.md +++ b/autogen/README.md @@ -28,7 +28,7 @@ There are multiple examples included in the [examples](./examples/) folder but s ```hcl module "gke" { - source = "terraform-google-modules/kubernetes-engine/google{% if private_cluster %}//modules/private-cluster{% endif %}" + source = "terraform-google-modules/kubernetes-engine/google{{ module_path }}" project_id = "" name = "gke-test-1" region = "us-central1" @@ -139,6 +139,102 @@ Version 1.0.0 of this module introduces a breaking change: adding the `disable-l In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | string | `"null"` | no | +| basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | +| basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| cloudrun | (Beta) Enable CloudRun addon | string | `"false"` | no | +| cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | +| cluster\_resource\_labels | The GCE resource labels (a map of key/value pairs) to be applied to the cluster | map(string) | `` | no | +| configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | +| create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | +| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | +| default\_max\_pods\_per\_node | The maximum number of pods to schedule per node | string | `"110"` | no | +| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | +| description | The description of the cluster | string | `""` | no | +| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | +| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | +| enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | +| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | +| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | +| enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | +| grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | +| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | +| http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | +| identity\_namespace | Workload Identity namespace | string | `""` | no | +| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | +| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | +| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | +| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | +| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | +| istio | (Beta) Enable Istio addon | string | `"false"` | no | +| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | +| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | +| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | +| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | +| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | +| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | +| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | +| name | The name of the cluster (required) | string | n/a | yes | +| network | The VPC network to host the cluster in (required) | string | n/a | yes | +| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | +| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | +| node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | +| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | +| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | +| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | +| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | +| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | +| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | +| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | +| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (required) | string | n/a | yes | +| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | +| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | +| service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | +| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | +| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | +| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | Cluster ca certificate (base64 encoded) | +| cloudrun\_enabled | Whether CloudRun enabled | +| endpoint | Cluster endpoint | +| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | +| http\_load\_balancing\_enabled | Whether http load balancing enabled | +| intranode\_visibility\_enabled | Whether intra-node visibility is enabled | +| istio\_enabled | Whether Istio is enabled | +| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | +| location | Cluster location (region if regional cluster, zone if zonal cluster) | +| logging\_service | Logging service used | +| master\_authorized\_networks\_config | Networks from which access to master is permitted | +| master\_version | Current master kubernetes version | +| min\_master\_version | Minimum master kubernetes version | +| monitoring\_service | Monitoring service used | +| name | Cluster name | +| network\_policy\_enabled | Whether network policy enabled | +| node\_pools\_names | List of node pools names | +| node\_pools\_versions | List of node pools versions | +| pod\_security\_policy\_enabled | Whether pod security policy is enabled | +| region | Cluster region | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| type | Cluster type (regional / zonal) | +| vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | +| zones | List of zones in which the cluster resides | + ## Requirements @@ -157,7 +253,7 @@ The [project factory](https://github.com/terraform-google-modules/terraform-goog - [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x #### Terraform and Plugins - [Terraform](https://www.terraform.io/downloads.html) 0.12 -{% if private_cluster or beta_cluster %} +{% if beta_cluster %} - [Terraform Provider for GCP Beta][terraform-provider-google-beta] v2.9 {% else %} - [Terraform Provider for GCP][terraform-provider-google] v2.9 @@ -193,141 +289,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew {% if private_cluster %} [upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md @@ -339,7 +300,7 @@ command. {% else %} [upgrading-to-v3.0]: docs/upgrading_to_v3.0.md {% endif %} -{% if private_cluster or beta_cluster %} +{% if beta_cluster %} [terraform-provider-google-beta]: https://github.com/terraform-providers/terraform-provider-google-beta {% else %} [terraform-provider-google]: https://github.com/terraform-providers/terraform-provider-google diff --git a/autogen/auth.tf b/autogen/auth.tf index 21275cd41e..a23689bb7b 100644 --- a/autogen/auth.tf +++ b/autogen/auth.tf @@ -20,7 +20,7 @@ Retrieve authentication token *****************************************/ data "google_client_config" "default" { - {% if private_cluster or beta_cluster %} + {% if beta_cluster %} provider = google-beta {% else %} provider = google diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 664de67855..4e5fd74d55 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -20,7 +20,7 @@ Create Container Cluster *****************************************/ resource "google_container_cluster" "primary" { - {% if private_cluster or beta_cluster %} + {% if beta_cluster %} provider = google-beta {% else %} provider = google @@ -67,6 +67,15 @@ resource "google_container_cluster" "primary" { } } + dynamic "resource_usage_export_config" { + for_each = var.resource_usage_export_dataset_id != "" ? [var.resource_usage_export_dataset_id] : [] + content { + enable_network_egress_metering = true + bigquery_destination { + dataset_id = resource_usage_export_config.value + } + } + } {% endif %} dynamic "master_authorized_networks_config" { for_each = var.master_authorized_networks_config @@ -134,7 +143,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool] + ignore_changes = [node_pool, initial_node_count] } timeouts { @@ -158,6 +167,14 @@ resource "google_container_cluster" "primary" { node_metadata = workload_metadata_config.value.node_metadata } } + + dynamic "sandbox_config" { + for_each = local.cluster_sandbox_enabled + + content { + sandbox_type = sandbox_config.value + } + } {% endif %} } } @@ -203,7 +220,11 @@ resource "google_container_cluster" "primary" { Create Container Cluster node pools *****************************************/ resource "google_container_node_pool" "pools" { + {% if beta_cluster %} provider = google-beta + {% else %} + provider = google + {% endif %} count = length(var.node_pools) name = var.node_pools[count.index]["name"] project = var.project_id @@ -223,9 +244,14 @@ resource "google_container_node_pool" "pools" { max_pods_per_node = lookup(var.node_pools[count.index], "max_pods_per_node", null) {% endif %} - autoscaling { - min_node_count = lookup(var.node_pools[count.index], "min_count", 1) - max_node_count = lookup(var.node_pools[count.index], "max_count", 100) + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + + dynamic "autoscaling" { + for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } } management { @@ -259,6 +285,7 @@ resource "google_container_node_pool" "pools" { "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints }, ) + {% if beta_cluster %} dynamic "taint" { for_each = concat( var.node_pools_taints["all"], @@ -270,6 +297,7 @@ resource "google_container_node_pool" "pools" { value = taint.value.value } } + {% endif %} tags = concat( ["gke-${var.name}"], ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], diff --git a/autogen/main.tf b/autogen/main.tf index fea7249e75..4483119017 100644 --- a/autogen/main.tf +++ b/autogen/main.tf @@ -20,7 +20,7 @@ Get available zones in region *****************************************/ data "google_compute_zones" "available" { - {% if private_cluster or beta_cluster %} + {% if beta_cluster %} provider = google-beta {% else %} provider = google @@ -76,6 +76,8 @@ locals { security_group = var.authenticator_security_group }] + cluster_sandbox_enabled = var.sandbox_enabled ? ["gvisor"] : [] + {% endif %} cluster_output_name = google_container_cluster.primary.name @@ -103,10 +105,10 @@ locals { {% if beta_cluster %} # BETA features - cluster_output_istio_enabled = google_container_cluster.primary.addons_config.0.istio_config.0.disabled - cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config.0.enabled + cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false + cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility - cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling.0.enabled + cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false # /BETA features {% endif %} @@ -136,7 +138,7 @@ locals { cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled {% if beta_cluster %} # BETA features - cluster_istio_enabled = ! local.cluster_output_istio_enabled + cluster_istio_enabled = ! local.cluster_output_istio_disabled cluster_cloudrun_enabled = var.cloudrun cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled diff --git a/autogen/networks.tf b/autogen/networks.tf index 84baaa8995..cff6762fa3 100644 --- a/autogen/networks.tf +++ b/autogen/networks.tf @@ -17,7 +17,7 @@ {{ autogeneration_note }} data "google_compute_network" "gke_network" { - {% if private_cluster or beta_cluster %} + {% if beta_cluster %} provider = google-beta {% else %} provider = google @@ -28,7 +28,7 @@ data "google_compute_network" "gke_network" { } data "google_compute_subnetwork" "gke_subnetwork" { - {% if private_cluster or beta_cluster %} + {% if beta_cluster %} provider = google-beta {% else %} provider = google diff --git a/autogen/variables.tf b/autogen/variables.tf index 46f34d3074..8fd25a7fc8 100644 --- a/autogen/variables.tf +++ b/autogen/variables.tf @@ -179,6 +179,7 @@ variable "node_pools_metadata" { } } +{% if beta_cluster %} variable "node_pools_taints" { type = map(list(object({key=string,value=string,effect=string}))) description = "Map of lists containing node taints by node-pool name" @@ -189,6 +190,7 @@ variable "node_pools_taints" { } } +{% endif %} variable "node_pools_tags" { type = map(list(string)) description = "Map of lists containing node network tags by node-pool name" @@ -367,9 +369,22 @@ variable "pod_security_policy_config" { }] } +variable "resource_usage_export_dataset_id" { + type = string + description = "The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic." + default = "" +} + variable "node_metadata" { description = "Specifies how node metadata is exposed to the workload running on the node" - default = "UNSPECIFIED" + default = "SECURE" + type = string +} + +variable "sandbox_enabled" { + type = bool + description = "(Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it)." + default = false } variable "enable_intranode_visibility" { @@ -378,7 +393,7 @@ variable "enable_intranode_visibility" { default = false } - variable "enable_vertical_pod_autoscaling" { +variable "enable_vertical_pod_autoscaling" { type = bool description = "Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it" default = false diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml new file mode 100644 index 0000000000..85139efe7d --- /dev/null +++ b/build/int.cloudbuild.yaml @@ -0,0 +1,41 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +timeout: 12600s +steps: +- id: prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && prepare_environment'] + env: + - 'TF_VAR_org_id=$_ORG_ID' + - 'TF_VAR_folder_id=$_FOLDER_ID' + - 'TF_VAR_billing_account=$_BILLING_ACCOUNT' +- id: create + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create'] +- id: converge + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge'] +- id: verify + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify'] +- id: destroy + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy'] +tags: +- 'ci' +- 'integration' +substitutions: + _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.1.0' diff --git a/build/lint.cloudbuild.yaml b/build/lint.cloudbuild.yaml new file mode 100644 index 0000000000..3b7306297c --- /dev/null +++ b/build/lint.cloudbuild.yaml @@ -0,0 +1,27 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +steps: +- id: 'lint-generation' + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && check_generate'] +- id: 'lint-tests' + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/usr/local/bin/test_lint.sh'] +tags: +- 'ci' +- 'lint' +substitutions: + _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.1.0' diff --git a/cluster.tf b/cluster.tf index 499e689a4b..ffdb27b0fc 100644 --- a/cluster.tf +++ b/cluster.tf @@ -99,7 +99,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool] + ignore_changes = [node_pool, initial_node_count] } timeouts { @@ -125,7 +125,7 @@ resource "google_container_cluster" "primary" { Create Container Cluster node pools *****************************************/ resource "google_container_node_pool" "pools" { - provider = google-beta + provider = google count = length(var.node_pools) name = var.node_pools[count.index]["name"] project = var.project_id @@ -142,9 +142,14 @@ resource "google_container_node_pool" "pools" { lookup(var.node_pools[count.index], "min_count", 1), ) - autoscaling { - min_node_count = lookup(var.node_pools[count.index], "min_count", 1) - max_node_count = lookup(var.node_pools[count.index], "max_count", 100) + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + + dynamic "autoscaling" { + for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } } management { @@ -178,17 +183,6 @@ resource "google_container_node_pool" "pools" { "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints }, ) - dynamic "taint" { - for_each = concat( - var.node_pools_taints["all"], - var.node_pools_taints[var.node_pools[count.index]["name"]], - ) - content { - effect = taint.value.effect - key = taint.value.key - value = taint.value.value - } - } tags = concat( ["gke-${var.name}"], ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], diff --git a/docs/upgrading_to_v5.0.md b/docs/upgrading_to_v5.0.md new file mode 100644 index 0000000000..39abfbe8a5 --- /dev/null +++ b/docs/upgrading_to_v5.0.md @@ -0,0 +1,82 @@ +# Upgrading to v5.0 + +The v5.0 release of *kubernetes-engine* is a backwards incompatible +release. + +## Migration Instructions + +### Node pool taints +Previously, node pool taints could be set on all module versions. + +Now, to set taints you must use the beta version of the module. + +```diff + module "kubernetes_engine_private_cluster" { +- source = "terraform-google-modules/kubernetes-engine/google" ++ source = "terraform-google-modules/kubernetes-engine/google//modules/beta-public-cluster" +- version = "~> 4.0" ++ version = "~> 5.0" + } +``` + +### Service Account creation + +Previously, if you explicitly specified a Service Account using the `service_account` variable on the module this was sufficient to force that Service Account to be used. + +Now, an additional `create_service_account` has been added with a default value of `true`. If you would like to use an explicitly created Service Account from outside the module, you will need to set `create_service_account` to `false` (in addition to passing in the Service Account email). + +No action is needed if you use the module's default service account. + +```diff + module "kubernetes_engine_private_cluster" { + source = "terraform-google-modules/kubernetes-engine/google" +- version = "~> 4.0" ++ version = "~> 5.0" + + service_account = "project-service-account@my-project.iam.gserviceaccount.com" ++ create_service_account = false + # ... + } +``` + +### Resource simplification +The `google_container_cluster` and `google_container_node_pool` resources previously were different between regional and zonal clusters. They have now been collapsed into a single resource using the `location` variable. + +If you are using regional clusters, no migration is needed. If you are using zonal clusters, a state migration is needed. You can use a [script](../helpers/migrate.py) we provided to determine the required state changes: + +1. Download the script + + ```sh + curl -O https://raw.githubusercontent.com/terraform-google-modules/terraform-google-kubernetes-engine/v5.0.0/helpers/migrate.py + chmod +x migrate.py + ``` + +2. Run the script in dryrun mode to confirm the expected changes: + + ```sh + $ ./migrate.py --dryrun + + ---- Migrating the following modules: + -- module.gke-cluster-dev.module.gke + ---- Commands to run: + terraform state mv -state terraform.tfstate "module.gke-cluster-dev.module.gke.google_container_cluster.zonal_primary[0]" "module.gke-cluster-dev.module.gke.google_container_cluster.primary[0]" + terraform state mv "module.gke-cluster-dev.module.gke.google_container_node_pool.zonal_pools[0]" "module.gke-cluster-dev.module.gke.google_container_node_pool.pools[0]" + ``` + +3. Execute the migration script + + ```sh + $ ./migrate.py + + ---- Migrating the following modules: + -- module.gke-cluster-dev.module.gke + ---- Commands to run: + Move "module.gke-cluster-dev.module.gke.google_container_cluster.zonal_primary[0]" to "module.gke-cluster-dev.module.gke.google_container_cluster.primary[0]" + Successfully moved 1 object(s). + Move "module.gke-cluster-dev.module.gke.google_container_node_pool.zonal_pools[0]" to "module.gke-cluster-dev.module.gke.google_container_node_pool.pools[0]" + Successfully moved 1 object(s). + Move "module.gke-cluster-dev.module.gke.null_resource.wait_for_zonal_cluster" to "module.gke-cluster-dev.module.gke.null_resource.wait_for_cluster" + Successfully moved 1 object(s). + ``` + +4. Run `terraform plan` to confirm no changes are expected. diff --git a/examples/deploy_service/main.tf b/examples/deploy_service/main.tf index 10bce771ef..3e15164913 100644 --- a/examples/deploy_service/main.tf +++ b/examples/deploy_service/main.tf @@ -23,11 +23,6 @@ provider "google" { region = var.region } -provider "google-beta" { - version = "~> 2.12.0" - region = var.region -} - provider "kubernetes" { load_config_file = false host = "https://${module.gke.endpoint}" diff --git a/examples/disable_client_cert/main.tf b/examples/disable_client_cert/main.tf index c64f09fd67..221eb994ce 100644 --- a/examples/disable_client_cert/main.tf +++ b/examples/disable_client_cert/main.tf @@ -23,11 +23,6 @@ provider "google" { region = var.region } -provider "google-beta" { - version = "~> 2.12.0" - region = var.region -} - module "gke" { source = "../../" diff --git a/examples/node_pool/main.tf b/examples/node_pool/main.tf index 2fb447fbb3..6662bb84ac 100644 --- a/examples/node_pool/main.tf +++ b/examples/node_pool/main.tf @@ -18,18 +18,13 @@ locals { cluster_type = "node-pool" } -provider "google" { - version = "~> 2.12.0" - region = var.region -} - provider "google-beta" { version = "~> 2.12.0" region = var.region } module "gke" { - source = "../../" + source = "../../modules/beta-public-cluster/" project_id = var.project_id name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" regional = false diff --git a/examples/shared_vpc/main.tf b/examples/shared_vpc/main.tf index a0221ca360..766239ead8 100644 --- a/examples/shared_vpc/main.tf +++ b/examples/shared_vpc/main.tf @@ -23,11 +23,6 @@ provider "google" { region = var.region } -provider "google-beta" { - version = "~> 2.12.0" - region = var.region -} - module "gke" { source = "../../" project_id = var.project_id diff --git a/examples/simple_regional/main.tf b/examples/simple_regional/main.tf index bd42f43d1a..4662435fbd 100644 --- a/examples/simple_regional/main.tf +++ b/examples/simple_regional/main.tf @@ -23,11 +23,6 @@ provider "google" { region = var.region } -provider "google-beta" { - version = "~> 2.12.0" - region = var.region -} - module "gke" { source = "../../" project_id = var.project_id diff --git a/examples/simple_regional_beta/main.tf b/examples/simple_regional_beta/main.tf index 9eaf2b6117..fc95090ede 100644 --- a/examples/simple_regional_beta/main.tf +++ b/examples/simple_regional_beta/main.tf @@ -18,12 +18,6 @@ locals { cluster_type = "simple-regional-beta" } -provider "google" { - version = "~> 2.12.0" - credentials = file(var.credentials_path) - region = var.region -} - provider "google-beta" { version = "~> 2.12.0" credentials = file(var.credentials_path) diff --git a/examples/simple_regional_private/main.tf b/examples/simple_regional_private/main.tf index b79c21c770..89568e86ee 100644 --- a/examples/simple_regional_private/main.tf +++ b/examples/simple_regional_private/main.tf @@ -18,7 +18,7 @@ locals { cluster_type = "simple-regional-private" } -provider "google-beta" { +provider "google" { version = "~> 2.12.0" region = var.region } diff --git a/examples/simple_zonal/main.tf b/examples/simple_zonal/main.tf index 7e04d7e4fe..edd90f7a0d 100644 --- a/examples/simple_zonal/main.tf +++ b/examples/simple_zonal/main.tf @@ -23,11 +23,6 @@ provider "google" { region = var.region } -provider "google-beta" { - version = "~> 2.12.0" - region = var.region -} - module "gke" { source = "../../" project_id = var.project_id diff --git a/examples/simple_zonal_private/main.tf b/examples/simple_zonal_private/main.tf index 2192787516..428218d76a 100644 --- a/examples/simple_zonal_private/main.tf +++ b/examples/simple_zonal_private/main.tf @@ -18,7 +18,7 @@ locals { cluster_type = "simple-regional-private" } -provider "google-beta" { +provider "google" { version = "~> 2.12.0" region = var.region } diff --git a/examples/stub_domains/main.tf b/examples/stub_domains/main.tf index 4227aac952..37264f781b 100644 --- a/examples/stub_domains/main.tf +++ b/examples/stub_domains/main.tf @@ -23,11 +23,6 @@ provider "google" { region = var.region } -provider "google-beta" { - version = "~> 2.12.0" - region = var.region -} - module "gke" { source = "../../" project_id = var.project_id diff --git a/examples/stub_domains_private/main.tf b/examples/stub_domains_private/main.tf index 046f9838c0..65bc48247d 100644 --- a/examples/stub_domains_private/main.tf +++ b/examples/stub_domains_private/main.tf @@ -14,7 +14,7 @@ * limitations under the License. */ -provider "google-beta" { +provider "google" { version = "~> 2.12.0" region = var.region } diff --git a/examples/stub_domains_upstream_nameservers/main.tf b/examples/stub_domains_upstream_nameservers/main.tf index 42f3967d5a..0da83b95c6 100644 --- a/examples/stub_domains_upstream_nameservers/main.tf +++ b/examples/stub_domains_upstream_nameservers/main.tf @@ -23,11 +23,6 @@ provider "google" { region = var.region } -provider "google-beta" { - version = "~> 2.12.0" - region = var.region -} - module "gke" { source = "../../" project_id = var.project_id diff --git a/examples/upstream_nameservers/main.tf b/examples/upstream_nameservers/main.tf index 8a997e8c7a..ecded7c29c 100644 --- a/examples/upstream_nameservers/main.tf +++ b/examples/upstream_nameservers/main.tf @@ -23,11 +23,6 @@ provider "google" { region = var.region } -provider "google-beta" { - version = "~> 2.12.0" - region = var.region -} - module "gke" { source = "../../" project_id = var.project_id diff --git a/helpers/generate_modules/generate_modules.py b/helpers/generate_modules/generate_modules.py index f6beb84832..c235e7ad65 100755 --- a/helpers/generate_modules/generate_modules.py +++ b/helpers/generate_modules/generate_modules.py @@ -46,13 +46,16 @@ def template_options(self, base): 'private_cluster': False, }), Module("./modules/private-cluster", { + 'module_path': '//modules/private-cluster', 'private_cluster': True }), Module("./modules/beta-private-cluster", { + 'module_path': '//modules/beta-private-cluster', 'private_cluster': True, 'beta_cluster': True, }), Module("./modules/beta-public-cluster", { + 'module_path': '//modules/beta-public-cluster', 'private_cluster': False, 'beta_cluster': True, }), diff --git a/helpers/migrate.py b/helpers/migrate.py new file mode 100755 index 0000000000..8f2d71cfce --- /dev/null +++ b/helpers/migrate.py @@ -0,0 +1,318 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import copy +import subprocess +import sys +import re + +MIGRATIONS = [ + { + "resource_type": "google_container_cluster", + "name": "zonal_primary", + "rename": "primary", + "module": "" + }, + { + "resource_type": "google_container_node_pool", + "name": "zonal_pools", + "rename": "pools", + "module": "" + }, + { + "resource_type": "null_resource", + "name": "wait_for_zonal_cluster", + "rename": "wait_for_cluster", + "module": "", + "plural": False + }, +] + + +class ModuleMigration: + """ + Migrate the resources from a flat project factory to match the new + module structure created by the G Suite refactor. + """ + + def __init__(self, source_module): + self.source_module = source_module + + def moves(self): + """ + Generate the set of old/new resource pairs that will be migrated + to the `destination` module. + """ + resources = self.targets() + moves = [] + for (old, migration) in resources: + new = copy.deepcopy(old) + new.module += migration["module"] + + # Update the copied resource with the "rename" value if it is set + if "rename" in migration: + new.name = migration["rename"] + + old.plural = migration.get("plural", True) + new.plural = migration.get("plural", True) + + pair = (old.path(), new.path()) + moves.append(pair) + return moves + + def targets(self): + """ + A list of resources that will be moved to the new module """ + to_move = [] + + for migration in MIGRATIONS: + resource_type = migration["resource_type"] + resource_name = migration["name"] + matching_resources = self.source_module.get_resources( + resource_type, + resource_name) + to_move += [(r, migration) for r in matching_resources] + + return to_move + + +class TerraformModule: + """ + A Terraform module with associated resources. + """ + + def __init__(self, name, resources): + """ + Create a new module and associate it with a list of resources. + """ + self.name = name + self.resources = resources + + def get_resources(self, resource_type=None, resource_name=None): + """ + Return a list of resources matching the given resource type and name. + """ + + ret = [] + for resource in self.resources: + matches_type = (resource_type is None or + resource_type == resource.resource_type) + + name_pattern = re.compile(r'%s(\[\d+\])?' % resource_name) + matches_name = (resource_name is None or + name_pattern.match(resource.name)) + + if matches_type and matches_name: + ret.append(resource) + + return ret + + def has_resource(self, resource_type=None, resource_name=None): + """ + Does this module contain a resource with the matching type and name? + """ + for resource in self.resources: + matches_type = (resource_type is None or + resource_type == resource.resource_type) + + matches_name = (resource_name is None or + resource_name in resource.name) + + if matches_type and matches_name: + return True + + return False + + def __repr__(self): + return "{}({!r}, {!r})".format( + self.__class__.__name__, + self.name, + [repr(resource) for resource in self.resources]) + + +class TerraformResource: + """ + A Terraform resource, defined by the the identifier of that resource. + """ + + @classmethod + def from_path(cls, path): + """ + Generate a new Terraform resource, based on the fully qualified + Terraform resource path. + """ + if re.match(r'\A[\w.\[\]-]+\Z', path) is None: + raise ValueError( + "Invalid Terraform resource path {!r}".format(path)) + + parts = path.split(".") + name = parts.pop() + resource_type = parts.pop() + module = ".".join(parts) + return cls(module, resource_type, name) + + def __init__(self, module, resource_type, name): + """ + Create a new TerraformResource from a pre-parsed path. + """ + self.module = module + self.resource_type = resource_type + + find_suffix = re.match(r'(^.+)\[(\d+)\]', name) + if find_suffix: + self.name = find_suffix.group(1) + self.index = find_suffix.group(2) + else: + self.name = name + self.index = -1 + + def path(self): + """ + Return the fully qualified resource path. + """ + parts = [self.module, self.resource_type, self.name] + if parts[0] == '': + del parts[0] + path = ".".join(parts) + if self.index != -1 and self.plural: + path = "{0}[{1}]".format(path, self.index) + return path + + def __repr__(self): + return "{}({!r}, {!r}, {!r})".format( + self.__class__.__name__, + self.module, + self.resource_type, + self.name) + + +def group_by_module(resources): + """ + Group a set of resources according to their containing module. + """ + + groups = {} + for resource in resources: + if resource.module in groups: + groups[resource.module].append(resource) + else: + groups[resource.module] = [resource] + + return [ + TerraformModule(name, contained) + for name, contained in groups.items() + ] + + +def read_state(statefile=None): + """ + Read the terraform state at the given path. + """ + argv = ["terraform", "state", "list"] + result = subprocess.run(argv, + capture_output=True, + check=True, + encoding='utf-8') + elements = result.stdout.split("\n") + elements.pop() + return elements + + +def state_changes_for_module(module, statefile=None): + """ + Compute the Terraform state changes (deletions and moves) for a single + module. + """ + commands = [] + + migration = ModuleMigration(module) + + for (old, new) in migration.moves(): + wrapper = '"{0}"' + argv = ["terraform", + "state", + "mv", + wrapper.format(old), + wrapper.format(new)] + commands.append(argv) + + return commands + + +def migrate(statefile=None, dryrun=False): + """ + Migrate the terraform state in `statefile` to match the post-refactor + resource structure. + """ + + # Generate a list of Terraform resource states from the output of + # `terraform state list` + resources = [ + TerraformResource.from_path(path) + for path in read_state(statefile) + ] + + # Group resources based on the module where they're defined. + modules = group_by_module(resources) + + # Filter our list of Terraform modules down to anything that looks like a + # zonal GKE module. We key this off the presence off of + # `google_container_cluster.zonal_primary` since that should almost always + # be unique to a GKE module. + modules_to_migrate = [ + module for module in modules + if module.has_resource("google_container_cluster", "zonal_primary") + ] + + print("---- Migrating the following modules:") + for module in modules_to_migrate: + print("-- " + module.name) + + # Collect a list of resources for each module + commands = [] + for module in modules_to_migrate: + commands += state_changes_for_module(module, statefile) + + print("---- Commands to run:") + for argv in commands: + if dryrun: + print(" ".join(argv)) + else: + argv = [arg.strip('"') for arg in argv] + subprocess.run(argv, check=True, encoding='utf-8') + + +def main(argv): + parser = argparser() + args = parser.parse_args(argv[1:]) + + # print("cp {} {}".format(args.oldstate, args.newstate)) + # shutil.copy(args.oldstate, args.newstate) + + migrate(dryrun=args.dryrun) + + +def argparser(): + parser = argparse.ArgumentParser(description='Migrate Terraform state') + parser.add_argument('--dryrun', action='store_true', + help='Print the `terraform state mv` commands instead ' + 'of running the commands.') + return parser + + +if __name__ == "__main__": + main(sys.argv) diff --git a/helpers/terraform_docs b/helpers/terraform_docs deleted file mode 100755 index c33230959b..0000000000 --- a/helpers/terraform_docs +++ /dev/null @@ -1,694 +0,0 @@ -#!/usr/bin/env bash - -set -e - -main() { - declare argv - argv=$(getopt -o a: --long args: -- "$@") || return - eval "set -- $argv" - - declare args - declare files - - for argv; do - case $argv in - (-a|--args) - shift - args="$1" - shift - ;; - (--) - shift - files="$@" - break - ;; - esac - done - - local hack_terraform_docs=$(terraform version | head -1 | grep -c 0.12) - - if [[ "$hack_terraform_docs" == "1" ]]; then - which awk 2>&1 >/dev/null || ( echo "awk is required for terraform-docs hack to work with Terraform 0.12"; exit 1) - - tmp_file_awk=$(mktemp "${TMPDIR:-/tmp}/terraform-docs-XXXXXXXXXX") - terraform_docs_awk "$tmp_file_awk" - terraform_docs "$tmp_file_awk" "$args" "$files" - rm -f "$tmp_file_awk" - else - terraform_docs "0" "$args" "$files" - fi - -} - -terraform_docs() { - readonly terraform_docs_awk_file="$1" - readonly args="$2" - readonly files="$3" - - declare -a paths - declare -a tfvars_files - - index=0 - - for file_with_path in $files; do - file_with_path="${file_with_path// /__REPLACED__SPACE__}" - - paths[index]=$(dirname "$file_with_path") - - if [[ "$file_with_path" == *".tfvars" ]]; then - tfvars_files+=("$file_with_path") - fi - - ((index+=1)) - done - - readonly tmp_file=$(mktemp) - readonly text_file="README.md" - - for path_uniq in $(echo "${paths[*]}" | tr ' ' '\n' | sort -u); do - path_uniq="${path_uniq//__REPLACED__SPACE__/ }" - - pushd "$path_uniq" > /dev/null - - if [[ ! -f "$text_file" ]]; then - popd > /dev/null - continue - fi - - if [[ "$terraform_docs_awk_file" == "0" ]]; then - terraform-docs $args md ./ > "$tmp_file" - else - # Can't append extension for mktemp, so renaming instead - tmp_file_docs=$(mktemp "${TMPDIR:-/tmp}/terraform-docs-XXXXXXXXXX") - mv "$tmp_file_docs" "$tmp_file_docs.tf" - tmp_file_docs_tf="$tmp_file_docs.tf" - - awk -f "$terraform_docs_awk_file" ./*.tf > "$tmp_file_docs_tf" - terraform-docs $args md "$tmp_file_docs_tf" > "$tmp_file" - rm -f "$tmp_file_docs_tf" - fi - - # Replace content between markers with the placeholder - https://stackoverflow.com/questions/1212799/how-do-i-extract-lines-between-two-line-delimiters-in-perl#1212834 - perl -i -ne 'if (/BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK/../END OF PRE-COMMIT-TERRAFORM DOCS HOOK/) { print $_ if /BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK/; print "I_WANT_TO_BE_REPLACED\n$_" if /END OF PRE-COMMIT-TERRAFORM DOCS HOOK/;} else { print $_ }' "$text_file" - - # Replace placeholder with the content of the file - perl -i -e 'open(F, "'"$tmp_file"'"); $f = join "", ; while(<>){if (/I_WANT_TO_BE_REPLACED/) {print $f} else {print $_};}' "$text_file" - - rm -f "$tmp_file" - - popd > /dev/null - done -} - -terraform_docs_awk() { - readonly output_file=$1 - - cat <<"EOF" > $output_file -# This script converts Terraform 0.12 variables/outputs to something suitable for `terraform-docs` -# As of terraform-docs v0.6.0, HCL2 is not supported. This script is a *dirty hack* to get around it. -# https://github.com/segmentio/terraform-docs/ -# https://github.com/segmentio/terraform-docs/issues/62 - -# Script was originally found here: https://github.com/cloudposse/build-harness/blob/master/bin/terraform-docs.awk - -{ - if ( $0 ~ /\{/ ) { - braceCnt++ - } - - if ( $0 ~ /\}/ ) { - braceCnt-- - } - - # [START] variable or output block started - if ($0 ~ /^[[:space:]]*(variable|output)[[:space:]][[:space:]]*"(.*?)"/) { - # Normalize the braceCnt (should be 1 now) - braceCnt = 1 - # [CLOSE] "default" block - if (blockDefCnt > 0) { - blockDefCnt = 0 - } - blockCnt++ - print $0 - } - - # [START] multiline default statement started - if (blockCnt > 0) { - if ($0 ~ /^[[:space:]][[:space:]]*(default)[[:space:]][[:space:]]*=/) { - if ($3 ~ "null") { - print " default = \"null\"" - } else { - print $0 - blockDefCnt++ - blockDefStart=1 - } - } - } - - # [PRINT] single line "description" - if (blockCnt > 0) { - if (blockDefCnt == 0) { - if ($0 ~ /^[[:space:]][[:space:]]*description[[:space:]][[:space:]]*=/) { - # [CLOSE] "default" block - if (blockDefCnt > 0) { - blockDefCnt = 0 - } - print $0 - } - } - } - - # [PRINT] single line "type" - if (blockCnt > 0) { - if ($0 ~ /^[[:space:]][[:space:]]*type[[:space:]][[:space:]]*=/ ) { - # [CLOSE] "default" block - if (blockDefCnt > 0) { - blockDefCnt = 0 - } - type=$3 - if (type ~ "object") { - print " type = \"object\"" - } else { - # legacy quoted types: "string", "list", and "map" - if ($3 ~ /^[[:space:]]*"(.*?)"[[:space:]]*$/) { - print " type = " $3 - } else { - print " type = \"" $3 "\"" - } - } - } - } - - # [CLOSE] variable/output block - if (blockCnt > 0) { - if (braceCnt == 0 && blockCnt > 0) { - blockCnt-- - print $0 - } - } - - # [PRINT] Multiline "default" statement - if (blockCnt > 0 && blockDefCnt > 0) { - if (blockDefStart == 1) { - blockDefStart = 0 - } else { - print $0 - } - } -} -EOF - -} - -getopt() { - # pure-getopt, a drop-in replacement for GNU getopt in pure Bash. - # version 1.4.3 - # - # Copyright 2012-2018 Aron Griffis - # - # Permission is hereby granted, free of charge, to any person obtaining - # a copy of this software and associated documentation files (the - # "Software"), to deal in the Software without restriction, including - # without limitation the rights to use, copy, modify, merge, publish, - # distribute, sublicense, and/or sell copies of the Software, and to - # permit persons to whom the Software is furnished to do so, subject to - # the following conditions: - # - # The above copyright notice and this permission notice shall be included - # in all copies or substantial portions of the Software. - # - # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - _getopt_main() { - # Returns one of the following statuses: - # 0 success - # 1 error parsing parameters - # 2 error in getopt invocation - # 3 internal error - # 4 reserved for -T - # - # For statuses 0 and 1, generates normalized and shell-quoted - # "options -- parameters" on stdout. - - declare parsed status - declare short long name flags - declare have_short=false - - # Synopsis from getopt man-page: - # - # getopt optstring parameters - # getopt [options] [--] optstring parameters - # getopt [options] -o|--options optstring [options] [--] parameters - # - # The first form can be normalized to the third form which - # _getopt_parse() understands. The second form can be recognized after - # first parse when $short hasn't been set. - - if [[ -n ${GETOPT_COMPATIBLE+isset} || $1 == [^-]* ]]; then - # Enable compatibility mode - flags=c$flags - # Normalize first to third synopsis form - set -- -o "$1" -- "${@:2}" - fi - - # First parse always uses flags=p since getopt always parses its own - # arguments effectively in this mode. - parsed=$(_getopt_parse getopt ahl:n:o:qQs:TuV \ - alternative,help,longoptions:,name:,options:,quiet,quiet-output,shell:,test,version \ - p "$@") - status=$? - if [[ $status != 0 ]]; then - if [[ $status == 1 ]]; then - echo "Try \`getopt --help' for more information." >&2 - # Since this is the first parse, convert status 1 to 2 - status=2 - fi - return $status - fi - eval "set -- $parsed" - - while [[ $# -gt 0 ]]; do - case $1 in - (-a|--alternative) - flags=a$flags ;; - - (-h|--help) - _getopt_help - return 2 # as does GNU getopt - ;; - - (-l|--longoptions) - long="$long${long:+,}$2" - shift ;; - - (-n|--name) - name=$2 - shift ;; - - (-o|--options) - short=$2 - have_short=true - shift ;; - - (-q|--quiet) - flags=q$flags ;; - - (-Q|--quiet-output) - flags=Q$flags ;; - - (-s|--shell) - case $2 in - (sh|bash) - flags=${flags//t/} ;; - (csh|tcsh) - flags=t$flags ;; - (*) - echo 'getopt: unknown shell after -s or --shell argument' >&2 - echo "Try \`getopt --help' for more information." >&2 - return 2 ;; - esac - shift ;; - - (-u|--unquoted) - flags=u$flags ;; - - (-T|--test) - return 4 ;; - - (-V|--version) - echo "pure-getopt 1.4.3" - return 0 ;; - - (--) - shift - break ;; - esac - - shift - done - - if ! $have_short; then - # $short was declared but never set, not even to an empty string. - # This implies the second form in the synopsis. - if [[ $# == 0 ]]; then - echo 'getopt: missing optstring argument' >&2 - echo "Try \`getopt --help' for more information." >&2 - return 2 - fi - short=$1 - have_short=true - shift - fi - - if [[ $short == -* ]]; then - # Leading dash means generate output in place rather than reordering, - # unless we're already in compatibility mode. - [[ $flags == *c* ]] || flags=i$flags - short=${short#?} - elif [[ $short == +* ]]; then - # Leading plus means POSIXLY_CORRECT, unless we're already in - # compatibility mode. - [[ $flags == *c* ]] || flags=p$flags - short=${short#?} - fi - - # This should fire if POSIXLY_CORRECT is in the environment, even if - # it's an empty string. That's the difference between :+ and + - flags=${POSIXLY_CORRECT+p}$flags - - _getopt_parse "${name:-getopt}" "$short" "$long" "$flags" "$@" - } - - _getopt_parse() { - # Inner getopt parser, used for both first parse and second parse. - # Returns 0 for success, 1 for error parsing, 3 for internal error. - # In the case of status 1, still generates stdout with whatever could - # be parsed. - # - # $flags is a string of characters with the following meanings: - # a - alternative parsing mode - # c - GETOPT_COMPATIBLE - # i - generate output in place rather than reordering - # p - POSIXLY_CORRECT - # q - disable error reporting - # Q - disable normal output - # t - quote for csh/tcsh - # u - unquoted output - - declare name="$1" short="$2" long="$3" flags="$4" - shift 4 - - # Split $long on commas, prepend double-dashes, strip colons; - # for use with _getopt_resolve_abbrev - declare -a longarr - _getopt_split longarr "$long" - longarr=( "${longarr[@]/#/--}" ) - longarr=( "${longarr[@]%:}" ) - longarr=( "${longarr[@]%:}" ) - - # Parse and collect options and parameters - declare -a opts params - declare o alt_recycled=false error=0 - - while [[ $# -gt 0 ]]; do - case $1 in - (--) - params=( "${params[@]}" "${@:2}" ) - break ;; - - (--*=*) - o=${1%%=*} - if ! o=$(_getopt_resolve_abbrev "$o" "${longarr[@]}"); then - error=1 - elif [[ ,"$long", == *,"${o#--}"::,* ]]; then - opts=( "${opts[@]}" "$o" "${1#*=}" ) - elif [[ ,"$long", == *,"${o#--}":,* ]]; then - opts=( "${opts[@]}" "$o" "${1#*=}" ) - elif [[ ,"$long", == *,"${o#--}",* ]]; then - if $alt_recycled; then o=${o#-}; fi - _getopt_err "$name: option '$o' doesn't allow an argument" - error=1 - else - echo "getopt: assertion failed (1)" >&2 - return 3 - fi - alt_recycled=false - ;; - - (--?*) - o=$1 - if ! o=$(_getopt_resolve_abbrev "$o" "${longarr[@]}"); then - error=1 - elif [[ ,"$long", == *,"${o#--}",* ]]; then - opts=( "${opts[@]}" "$o" ) - elif [[ ,"$long", == *,"${o#--}::",* ]]; then - opts=( "${opts[@]}" "$o" '' ) - elif [[ ,"$long", == *,"${o#--}:",* ]]; then - if [[ $# -ge 2 ]]; then - shift - opts=( "${opts[@]}" "$o" "$1" ) - else - if $alt_recycled; then o=${o#-}; fi - _getopt_err "$name: option '$o' requires an argument" - error=1 - fi - else - echo "getopt: assertion failed (2)" >&2 - return 3 - fi - alt_recycled=false - ;; - - (-*) - if [[ $flags == *a* ]]; then - # Alternative parsing mode! - # Try to handle as a long option if any of the following apply: - # 1. There's an equals sign in the mix -x=3 or -xy=3 - # 2. There's 2+ letters and an abbreviated long match -xy - # 3. There's a single letter and an exact long match - # 4. There's a single letter and no short match - o=${1::2} # temp for testing #4 - if [[ $1 == *=* || $1 == -?? || \ - ,$long, == *,"${1#-}"[:,]* || \ - ,$short, != *,"${o#-}"[:,]* ]]; then - o=$(_getopt_resolve_abbrev "${1%%=*}" "${longarr[@]}" 2>/dev/null) - case $? in - (0) - # Unambiguous match. Let the long options parser handle - # it, with a flag to get the right error message. - set -- "-$1" "${@:2}" - alt_recycled=true - continue ;; - (1) - # Ambiguous match, generate error and continue. - _getopt_resolve_abbrev "${1%%=*}" "${longarr[@]}" >/dev/null - error=1 - shift - continue ;; - (2) - # No match, fall through to single-character check. - true ;; - (*) - echo "getopt: assertion failed (3)" >&2 - return 3 ;; - esac - fi - fi - - o=${1::2} - if [[ "$short" == *"${o#-}"::* ]]; then - if [[ ${#1} -gt 2 ]]; then - opts=( "${opts[@]}" "$o" "${1:2}" ) - else - opts=( "${opts[@]}" "$o" '' ) - fi - elif [[ "$short" == *"${o#-}":* ]]; then - if [[ ${#1} -gt 2 ]]; then - opts=( "${opts[@]}" "$o" "${1:2}" ) - elif [[ $# -ge 2 ]]; then - shift - opts=( "${opts[@]}" "$o" "$1" ) - else - _getopt_err "$name: option requires an argument -- '${o#-}'" - error=1 - fi - elif [[ "$short" == *"${o#-}"* ]]; then - opts=( "${opts[@]}" "$o" ) - if [[ ${#1} -gt 2 ]]; then - set -- "$o" "-${1:2}" "${@:2}" - fi - else - if [[ $flags == *a* ]]; then - # Alternative parsing mode! Report on the entire failed - # option. GNU includes =value but we omit it for sanity with - # very long values. - _getopt_err "$name: unrecognized option '${1%%=*}'" - else - _getopt_err "$name: invalid option -- '${o#-}'" - if [[ ${#1} -gt 2 ]]; then - set -- "$o" "-${1:2}" "${@:2}" - fi - fi - error=1 - fi ;; - - (*) - # GNU getopt in-place mode (leading dash on short options) - # overrides POSIXLY_CORRECT - if [[ $flags == *i* ]]; then - opts=( "${opts[@]}" "$1" ) - elif [[ $flags == *p* ]]; then - params=( "${params[@]}" "$@" ) - break - else - params=( "${params[@]}" "$1" ) - fi - esac - - shift - done - - if [[ $flags == *Q* ]]; then - true # generate no output - else - echo -n ' ' - if [[ $flags == *[cu]* ]]; then - printf '%s -- %s' "${opts[*]}" "${params[*]}" - else - if [[ $flags == *t* ]]; then - _getopt_quote_csh "${opts[@]}" -- "${params[@]}" - else - _getopt_quote "${opts[@]}" -- "${params[@]}" - fi - fi - echo - fi - - return $error - } - - _getopt_err() { - if [[ $flags != *q* ]]; then - printf '%s\n' "$1" >&2 - fi - } - - _getopt_resolve_abbrev() { - # Resolves an abbrevation from a list of possibilities. - # If the abbreviation is unambiguous, echoes the expansion on stdout - # and returns 0. If the abbreviation is ambiguous, prints a message on - # stderr and returns 1. (For first parse this should convert to exit - # status 2.) If there is no match at all, prints a message on stderr - # and returns 2. - declare a q="$1" - declare -a matches - shift - for a; do - if [[ $q == "$a" ]]; then - # Exact match. Squash any other partial matches. - matches=( "$a" ) - break - elif [[ $flags == *a* && $q == -[^-]* && $a == -"$q" ]]; then - # Exact alternative match. Squash any other partial matches. - matches=( "$a" ) - break - elif [[ $a == "$q"* ]]; then - # Abbreviated match. - matches=( "${matches[@]}" "$a" ) - elif [[ $flags == *a* && $q == -[^-]* && $a == -"$q"* ]]; then - # Abbreviated alternative match. - matches=( "${matches[@]}" "${a#-}" ) - fi - done - case ${#matches[@]} in - (0) - [[ $flags == *q* ]] || \ - printf "$name: unrecognized option %s\\n" >&2 \ - "$(_getopt_quote "$q")" - return 2 ;; - (1) - printf '%s' "${matches[0]}"; return 0 ;; - (*) - [[ $flags == *q* ]] || \ - printf "$name: option %s is ambiguous; possibilities: %s\\n" >&2 \ - "$(_getopt_quote "$q")" "$(_getopt_quote "${matches[@]}")" - return 1 ;; - esac - } - - _getopt_split() { - # Splits $2 at commas to build array specified by $1 - declare IFS=, - eval "$1=( \$2 )" - } - - _getopt_quote() { - # Quotes arguments with single quotes, escaping inner single quotes - declare s space q=\' - for s; do - printf "$space'%s'" "${s//$q/$q\\$q$q}" - space=' ' - done - } - - _getopt_quote_csh() { - # Quotes arguments with single quotes, escaping inner single quotes, - # bangs, backslashes and newlines - declare s i c space - for s; do - echo -n "$space'" - for ((i=0; i<${#s}; i++)); do - c=${s:i:1} - case $c in - (\\|\'|!) - echo -n "'\\$c'" ;; - ($'\n') - echo -n "\\$c" ;; - (*) - echo -n "$c" ;; - esac - done - echo -n \' - space=' ' - done - } - - _getopt_help() { - cat <<-EOT >&2 - - Usage: - getopt - getopt [options] [--] - getopt [options] -o|--options [options] [--] - - Parse command options. - - Options: - -a, --alternative allow long options starting with single - - -l, --longoptions the long options to be recognized - -n, --name the name under which errors are reported - -o, --options the short options to be recognized - -q, --quiet disable error reporting by getopt(3) - -Q, --quiet-output no normal output - -s, --shell set quoting conventions to those of - -T, --test test for getopt(1) version - -u, --unquoted do not quote the output - - -h, --help display this help and exit - -V, --version output version information and exit - - For more details see getopt(1). - EOT - } - - _getopt_version_check() { - if [[ -z $BASH_VERSION ]]; then - echo "getopt: unknown version of bash might not be compatible" >&2 - return 1 - fi - - # This is a lexical comparison that should be sufficient forever. - if [[ $BASH_VERSION < 2.05b ]]; then - echo "getopt: bash $BASH_VERSION might not be compatible" >&2 - return 1 - fi - - return 0 - } - - _getopt_version_check - _getopt_main "$@" - declare status=$? - unset -f _getopt_main _getopt_err _getopt_parse _getopt_quote \ - _getopt_quote_csh _getopt_resolve_abbrev _getopt_split _getopt_help \ - _getopt_version_check - return $status -} - -[[ $BASH_SOURCE != "$0" ]] || main "$@" \ No newline at end of file diff --git a/helpers/terraform_validate b/helpers/terraform_validate deleted file mode 100755 index 0c284194ac..0000000000 --- a/helpers/terraform_validate +++ /dev/null @@ -1,23 +0,0 @@ -#! /bin/bash -# -# Copyright 2019 Google LLC. This software is provided as-is, without warranty -# or representation for any use or purpose. Your use of it is subject to your -# agreement with Google. -# -# This script initializes modules so that terraform validate as of 0.12 behaves -# as expected and does not issue errors such as: -# -# Error: Module not installed -# -# on test/fixtures/shared_vpc_no_subnets/main.tf line 37: -# 37: module "project-factory" { -# -# This module is not yet installed. Run "terraform init" to install all modules -# required by this configuration. - -# The first and only argument to this script is the directory containing *.tf -# files to validate. This directory is assumed to be a root module. - -cd "$1" -terraform init -backend=false -terraform validate \ No newline at end of file diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 4fe5c70dc8..8782de87d2 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -25,7 +25,7 @@ There are multiple examples included in the [examples](./examples/) folder but s ```hcl module "gke" { - source = "terraform-google-modules/kubernetes-engine/google//modules/private-cluster" + source = "terraform-google-modules/kubernetes-engine/google//modules/beta-private-cluster" project_id = "" name = "gke-test-1" region = "us-central1" @@ -177,7 +177,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy | Enable network policy addon | bool | `"false"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | @@ -191,6 +191,8 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | +| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | @@ -276,141 +278,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew [upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index d887d332ed..c481c69a35 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -62,6 +62,15 @@ resource "google_container_cluster" "primary" { } } + dynamic "resource_usage_export_config" { + for_each = var.resource_usage_export_dataset_id != "" ? [var.resource_usage_export_dataset_id] : [] + content { + enable_network_egress_metering = true + bigquery_destination { + dataset_id = resource_usage_export_config.value + } + } + } dynamic "master_authorized_networks_config" { for_each = var.master_authorized_networks_config content { @@ -126,7 +135,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool] + ignore_changes = [node_pool, initial_node_count] } timeouts { @@ -149,6 +158,14 @@ resource "google_container_cluster" "primary" { node_metadata = workload_metadata_config.value.node_metadata } } + + dynamic "sandbox_config" { + for_each = local.cluster_sandbox_enabled + + content { + sandbox_type = sandbox_config.value + } + } } } @@ -207,9 +224,14 @@ resource "google_container_node_pool" "pools" { ) max_pods_per_node = lookup(var.node_pools[count.index], "max_pods_per_node", null) - autoscaling { - min_node_count = lookup(var.node_pools[count.index], "min_count", 1) - max_node_count = lookup(var.node_pools[count.index], "max_count", 100) + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + + dynamic "autoscaling" { + for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } } management { diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index 41f44b986d..551f865a1b 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -71,6 +71,8 @@ locals { security_group = var.authenticator_security_group }] + cluster_sandbox_enabled = var.sandbox_enabled ? ["gvisor"] : [] + cluster_output_name = google_container_cluster.primary.name cluster_output_location = google_container_cluster.primary.location @@ -92,10 +94,10 @@ locals { cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled # BETA features - cluster_output_istio_enabled = google_container_cluster.primary.addons_config.0.istio_config.0.disabled - cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config.0.enabled + cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false + cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility - cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling.0.enabled + cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false # /BETA features @@ -123,7 +125,7 @@ locals { cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled # BETA features - cluster_istio_enabled = ! local.cluster_output_istio_enabled + cluster_istio_enabled = ! local.cluster_output_istio_disabled cluster_cloudrun_enabled = var.cloudrun cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index 6d1fa393ce..d783248ea2 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -364,9 +364,22 @@ variable "pod_security_policy_config" { }] } +variable "resource_usage_export_dataset_id" { + type = string + description = "The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic." + default = "" +} + variable "node_metadata" { description = "Specifies how node metadata is exposed to the workload running on the node" - default = "UNSPECIFIED" + default = "SECURE" + type = string +} + +variable "sandbox_enabled" { + type = bool + description = "(Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it)." + default = false } variable "enable_intranode_visibility" { diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 75a7e55188..d611d2b787 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -23,7 +23,7 @@ There are multiple examples included in the [examples](./examples/) folder but s ```hcl module "gke" { - source = "terraform-google-modules/kubernetes-engine/google" + source = "terraform-google-modules/kubernetes-engine/google//modules/beta-public-cluster" project_id = "" name = "gke-test-1" region = "us-central1" @@ -168,7 +168,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy | Enable network policy addon | bool | `"false"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | @@ -182,6 +182,8 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | +| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | @@ -267,141 +269,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew [upgrading-to-v2.0]: docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index c26d85ff50..a264e932b9 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -62,6 +62,15 @@ resource "google_container_cluster" "primary" { } } + dynamic "resource_usage_export_config" { + for_each = var.resource_usage_export_dataset_id != "" ? [var.resource_usage_export_dataset_id] : [] + content { + enable_network_egress_metering = true + bigquery_destination { + dataset_id = resource_usage_export_config.value + } + } + } dynamic "master_authorized_networks_config" { for_each = var.master_authorized_networks_config content { @@ -126,7 +135,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool] + ignore_changes = [node_pool, initial_node_count] } timeouts { @@ -149,6 +158,14 @@ resource "google_container_cluster" "primary" { node_metadata = workload_metadata_config.value.node_metadata } } + + dynamic "sandbox_config" { + for_each = local.cluster_sandbox_enabled + + content { + sandbox_type = sandbox_config.value + } + } } } @@ -202,9 +219,14 @@ resource "google_container_node_pool" "pools" { ) max_pods_per_node = lookup(var.node_pools[count.index], "max_pods_per_node", null) - autoscaling { - min_node_count = lookup(var.node_pools[count.index], "min_count", 1) - max_node_count = lookup(var.node_pools[count.index], "max_count", 100) + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + + dynamic "autoscaling" { + for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } } management { diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index e05f4bc726..cd4e6c2c4f 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -71,6 +71,8 @@ locals { security_group = var.authenticator_security_group }] + cluster_sandbox_enabled = var.sandbox_enabled ? ["gvisor"] : [] + cluster_output_name = google_container_cluster.primary.name cluster_output_location = google_container_cluster.primary.location @@ -92,10 +94,10 @@ locals { cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled # BETA features - cluster_output_istio_enabled = google_container_cluster.primary.addons_config.0.istio_config.0.disabled - cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config.0.enabled + cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false + cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility - cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling.0.enabled + cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false # /BETA features @@ -123,7 +125,7 @@ locals { cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled # BETA features - cluster_istio_enabled = ! local.cluster_output_istio_enabled + cluster_istio_enabled = ! local.cluster_output_istio_disabled cluster_cloudrun_enabled = var.cloudrun cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 7873500f6e..8d20d440e2 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -340,9 +340,22 @@ variable "pod_security_policy_config" { }] } +variable "resource_usage_export_dataset_id" { + type = string + description = "The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic." + default = "" +} + variable "node_metadata" { description = "Specifies how node metadata is exposed to the workload running on the node" - default = "UNSPECIFIED" + default = "SECURE" + type = string +} + +variable "sandbox_enabled" { + type = bool + description = "(Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it)." + default = false } variable "enable_intranode_visibility" { diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index 48ffa5b508..dc0c16513c 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -171,7 +171,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | | node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | -| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | @@ -226,7 +225,7 @@ The [project factory](https://github.com/terraform-google-modules/terraform-goog - [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x #### Terraform and Plugins - [Terraform](https://www.terraform.io/downloads.html) 0.12 -- [Terraform Provider for GCP Beta][terraform-provider-google-beta] v2.9 +- [Terraform Provider for GCP][terraform-provider-google] v2.9 ### Configure a Service Account In order to execute this module you must have a Service Account with the @@ -258,144 +257,9 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew [upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md -[terraform-provider-google-beta]: https://github.com/terraform-providers/terraform-provider-google-beta +[terraform-provider-google]: https://github.com/terraform-providers/terraform-provider-google [3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 [terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/modules/private-cluster/auth.tf b/modules/private-cluster/auth.tf index c177eee5a7..48e7cc6a5f 100644 --- a/modules/private-cluster/auth.tf +++ b/modules/private-cluster/auth.tf @@ -20,7 +20,7 @@ Retrieve authentication token *****************************************/ data "google_client_config" "default" { - provider = google-beta + provider = google } /****************************************** diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index 6ea5394b02..412e8295ed 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -20,7 +20,7 @@ Create Container Cluster *****************************************/ resource "google_container_cluster" "primary" { - provider = google-beta + provider = google name = var.name description = var.description @@ -99,7 +99,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool] + ignore_changes = [node_pool, initial_node_count] } timeouts { @@ -130,7 +130,7 @@ resource "google_container_cluster" "primary" { Create Container Cluster node pools *****************************************/ resource "google_container_node_pool" "pools" { - provider = google-beta + provider = google count = length(var.node_pools) name = var.node_pools[count.index]["name"] project = var.project_id @@ -147,9 +147,14 @@ resource "google_container_node_pool" "pools" { lookup(var.node_pools[count.index], "min_count", 1), ) - autoscaling { - min_node_count = lookup(var.node_pools[count.index], "min_count", 1) - max_node_count = lookup(var.node_pools[count.index], "max_count", 100) + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + + dynamic "autoscaling" { + for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } } management { @@ -183,17 +188,6 @@ resource "google_container_node_pool" "pools" { "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints }, ) - dynamic "taint" { - for_each = concat( - var.node_pools_taints["all"], - var.node_pools_taints[var.node_pools[count.index]["name"]], - ) - content { - effect = taint.value.effect - key = taint.value.key - value = taint.value.value - } - } tags = concat( ["gke-${var.name}"], ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index 181bddfda7..3ce7084988 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -20,7 +20,7 @@ Get available zones in region *****************************************/ data "google_compute_zones" "available" { - provider = google-beta + provider = google project = var.project_id region = local.region diff --git a/modules/private-cluster/networks.tf b/modules/private-cluster/networks.tf index 2456654130..aae034eee5 100644 --- a/modules/private-cluster/networks.tf +++ b/modules/private-cluster/networks.tf @@ -17,14 +17,14 @@ // This file was automatically generated from a template in ./autogen data "google_compute_network" "gke_network" { - provider = google-beta + provider = google name = var.network project = local.network_project_id } data "google_compute_subnetwork" "gke_subnetwork" { - provider = google-beta + provider = google name = var.subnetwork region = local.region diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index 65edafd095..28b744d868 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -179,16 +179,6 @@ variable "node_pools_metadata" { } } -variable "node_pools_taints" { - type = map(list(object({ key = string, value = string, effect = string }))) - description = "Map of lists containing node taints by node-pool name" - - default = { - all = [] - default-node-pool = [] - } -} - variable "node_pools_tags" { type = map(list(string)) description = "Map of lists containing node network tags by node-pool name" diff --git a/test/.gitignore b/test/.gitignore new file mode 100644 index 0000000000..d69ba0d42f --- /dev/null +++ b/test/.gitignore @@ -0,0 +1 @@ +source.sh diff --git a/test/boilerplate/boilerplate.Dockerfile.txt b/test/boilerplate/boilerplate.Dockerfile.txt deleted file mode 100644 index b0c7da3d77..0000000000 --- a/test/boilerplate/boilerplate.Dockerfile.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/test/boilerplate/boilerplate.Makefile.txt b/test/boilerplate/boilerplate.Makefile.txt deleted file mode 100644 index b0c7da3d77..0000000000 --- a/test/boilerplate/boilerplate.Makefile.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/test/boilerplate/boilerplate.go.txt b/test/boilerplate/boilerplate.go.txt deleted file mode 100644 index 557e16f064..0000000000 --- a/test/boilerplate/boilerplate.go.txt +++ /dev/null @@ -1,15 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ diff --git a/test/boilerplate/boilerplate.py.txt b/test/boilerplate/boilerplate.py.txt deleted file mode 100644 index b0c7da3d77..0000000000 --- a/test/boilerplate/boilerplate.py.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/test/boilerplate/boilerplate.sh.txt b/test/boilerplate/boilerplate.sh.txt deleted file mode 100644 index 2e94f3e551..0000000000 --- a/test/boilerplate/boilerplate.sh.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/test/boilerplate/boilerplate.xml.txt b/test/boilerplate/boilerplate.xml.txt deleted file mode 100644 index 3d98cdc6e5..0000000000 --- a/test/boilerplate/boilerplate.xml.txt +++ /dev/null @@ -1,15 +0,0 @@ - diff --git a/test/boilerplate/boilerplate.yaml.txt b/test/boilerplate/boilerplate.yaml.txt deleted file mode 100644 index b0c7da3d77..0000000000 --- a/test/boilerplate/boilerplate.yaml.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/test/ci/workload-metadata-config.yml b/test/ci/workload-metadata-config.yml index 23874671db..231c8dfc3a 100644 --- a/test/ci/workload-metadata-config.yml +++ b/test/ci/workload-metadata-config.yml @@ -15,4 +15,5 @@ params: SUITE: "workload-metadata-config-local" COMPUTE_ENGINE_SERVICE_ACCOUNT: "" REGION: "us-east4" - ZONES: '["us-east4-a", "us-east4-b", "us-east4-c"]' \ No newline at end of file + ZONES: '["us-east4-a", "us-east4-b", "us-east4-c"]' + diff --git a/test/ci_integration.sh b/test/ci_integration.sh deleted file mode 100755 index 365ed3862e..0000000000 --- a/test/ci_integration.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Always clean up. -DELETE_AT_EXIT="$(mktemp -d)" -finish() { - echo 'BEGIN: finish() trap handler' >&2 - kitchen destroy "$SUITE" - [[ -d "${DELETE_AT_EXIT}" ]] && rm -rf "${DELETE_AT_EXIT}" - echo 'END: finish() trap handler' >&2 -} - -# Map the input parameters provided by Concourse CI, or whatever mechanism is -# running the tests to Terraform input variables. Also setup credentials for -# use with kitchen-terraform, inspec, and gcloud. -setup_environment() { - local tmpfile - tmpfile="$(mktemp)" - echo "${SERVICE_ACCOUNT_JSON}" > "${tmpfile}" - - echo "${SERVICE_ACCOUNT_JSON}" > "test/fixtures/shared/credentials.json" - - # gcloud variables - export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${tmpfile}" - # Application default credentials (Terraform google provider and inspec-gcp) - export GOOGLE_APPLICATION_CREDENTIALS="${tmpfile}" - - # Terraform variables - export TF_VAR_project_id="$PROJECT_ID" - export TF_VAR_credentials_path_relative="../shared/credentials.json" - export TF_VAR_region="$REGION" - export TF_VAR_zones="$ZONES" - export TF_VAR_compute_engine_service_account="$COMPUTE_ENGINE_SERVICE_ACCOUNT" -} - -main() { - export SUITE="${SUITE:-}" - - set -eu - # Setup trap handler to auto-cleanup - export TMPDIR="${DELETE_AT_EXIT}" - trap finish EXIT - - # Setup environment variables - setup_environment - set -x - - # Execute the test lifecycle - kitchen create "$SUITE" - kitchen converge "$SUITE" - kitchen verify "$SUITE" -} - -# if script is being executed and not sourced. -if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then - main "$@" -fi diff --git a/test/fixtures/deploy_service/terraform.tfvars b/test/fixtures/deploy_service/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/deploy_service/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/disable_client_cert/terraform.tfvars b/test/fixtures/disable_client_cert/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/disable_client_cert/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/node_pool/terraform.tfvars b/test/fixtures/node_pool/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/node_pool/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/shared/terraform.tfvars b/test/fixtures/shared/terraform.tfvars deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/fixtures/shared/terraform.tfvars.sample b/test/fixtures/shared/terraform.tfvars.sample deleted file mode 100644 index 3110e9b3d5..0000000000 --- a/test/fixtures/shared/terraform.tfvars.sample +++ /dev/null @@ -1,4 +0,0 @@ -project_id="" -region="us-east4" -zones=["us-east4-a","us-east4-b","us-east4-c"] -compute_engine_service_account="" diff --git a/test/fixtures/shared/variables.tf b/test/fixtures/shared/variables.tf index f8e3d6dfa4..76280e0065 100644 --- a/test/fixtures/shared/variables.tf +++ b/test/fixtures/shared/variables.tf @@ -20,12 +20,13 @@ variable "project_id" { variable "region" { description = "The GCP region to create and test resources in" + default = "us-east4" } variable "zones" { type = list(string) description = "The GCP zones to create and test resources in, for applicable tests" - default = [] + default = ["us-east4-a", "us-east4-b", "us-east4-c"] } variable "compute_engine_service_account" { diff --git a/test/fixtures/shared_vpc/terraform.tfvars b/test/fixtures/shared_vpc/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/shared_vpc/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/simple_regional/terraform.tfvars b/test/fixtures/simple_regional/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/simple_regional/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/simple_regional_private/network.tf b/test/fixtures/simple_regional_private/network.tf index c50c2d12d1..f34f629069 100644 --- a/test/fixtures/simple_regional_private/network.tf +++ b/test/fixtures/simple_regional_private/network.tf @@ -20,10 +20,6 @@ resource "random_string" "suffix" { upper = false } -provider "google-beta" { - project = var.project_id -} - resource "google_compute_network" "main" { project = var.project_id name = "cft-gke-test-${random_string.suffix.result}" diff --git a/test/fixtures/simple_regional_private/terraform.tfvars b/test/fixtures/simple_regional_private/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/simple_regional_private/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/simple_zonal/terraform.tfvars b/test/fixtures/simple_zonal/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/simple_zonal/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/simple_zonal_private/network.tf b/test/fixtures/simple_zonal_private/network.tf index c50c2d12d1..76d33f6bfc 100644 --- a/test/fixtures/simple_zonal_private/network.tf +++ b/test/fixtures/simple_zonal_private/network.tf @@ -20,9 +20,6 @@ resource "random_string" "suffix" { upper = false } -provider "google-beta" { - project = var.project_id -} resource "google_compute_network" "main" { project = var.project_id diff --git a/test/fixtures/simple_zonal_private/terraform.tfvars b/test/fixtures/simple_zonal_private/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/simple_zonal_private/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/stub_domains/terraform.tfvars b/test/fixtures/stub_domains/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/stub_domains/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/stub_domains_private/terraform.tfvars b/test/fixtures/stub_domains_private/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/stub_domains_private/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/stub_domains_upstream_nameservers/terraform.tfvars b/test/fixtures/stub_domains_upstream_nameservers/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/stub_domains_upstream_nameservers/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/upstream_nameservers/terraform.tfvars b/test/fixtures/upstream_nameservers/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/upstream_nameservers/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/workload_metadata_config/terraform.tfvars b/test/fixtures/workload_metadata_config/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/workload_metadata_config/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/integration/deploy_service/controls/gcloud.rb b/test/integration/deploy_service/controls/gcloud.rb index 2f8cfb2a38..fd72b9180b 100644 --- a/test/integration/deploy_service/controls/gcloud.rb +++ b/test/integration/deploy_service/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/deploy_service/controls/kubectl.rb b/test/integration/deploy_service/controls/kubectl.rb index 1443f94057..2d4a473d2c 100644 --- a/test/integration/deploy_service/controls/kubectl.rb +++ b/test/integration/deploy_service/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/disable_client_cert/controls/gcloud.rb b/test/integration/disable_client_cert/controls/gcloud.rb index c4739ffdaa..91d0c9df87 100644 --- a/test/integration/disable_client_cert/controls/gcloud.rb +++ b/test/integration/disable_client_cert/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/node_pool/controls/gcloud.rb b/test/integration/node_pool/controls/gcloud.rb index a9696c211a..6ff5fdd201 100644 --- a/test/integration/node_pool/controls/gcloud.rb +++ b/test/integration/node_pool/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/node_pool/controls/kubectl.rb b/test/integration/node_pool/controls/kubectl.rb index fb11abad17..471f9cb33f 100644 --- a/test/integration/node_pool/controls/kubectl.rb +++ b/test/integration/node_pool/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/shared_vpc/controls/gcloud.rb b/test/integration/shared_vpc/controls/gcloud.rb index 2f8cfb2a38..fd72b9180b 100644 --- a/test/integration/shared_vpc/controls/gcloud.rb +++ b/test/integration/shared_vpc/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/simple_regional/controls/gcloud.rb b/test/integration/simple_regional/controls/gcloud.rb index e3fba671b3..e6bbcfc047 100644 --- a/test/integration/simple_regional/controls/gcloud.rb +++ b/test/integration/simple_regional/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/simple_regional_private/controls/gcloud.rb b/test/integration/simple_regional_private/controls/gcloud.rb index f4df827813..b15dafcd02 100644 --- a/test/integration/simple_regional_private/controls/gcloud.rb +++ b/test/integration/simple_regional_private/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/simple_zonal/controls/gcloud.rb b/test/integration/simple_zonal/controls/gcloud.rb index cab5f8e4fd..c2e72936b0 100644 --- a/test/integration/simple_zonal/controls/gcloud.rb +++ b/test/integration/simple_zonal/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/simple_zonal/controls/gcp.rb b/test/integration/simple_zonal/controls/gcp.rb index 8e4cf6f96c..6e9ade64ff 100644 --- a/test/integration/simple_zonal/controls/gcp.rb +++ b/test/integration/simple_zonal/controls/gcp.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/simple_zonal/inspec.yml b/test/integration/simple_zonal/inspec.yml index 028e773638..5cb8ff9e01 100644 --- a/test/integration/simple_zonal/inspec.yml +++ b/test/integration/simple_zonal/inspec.yml @@ -27,4 +27,5 @@ attributes: type: string - name: service_account required: true - type: string \ No newline at end of file + type: string + diff --git a/test/integration/simple_zonal_private/controls/gcloud.rb b/test/integration/simple_zonal_private/controls/gcloud.rb index 2f808e136c..9968affcb6 100644 --- a/test/integration/simple_zonal_private/controls/gcloud.rb +++ b/test/integration/simple_zonal_private/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains/controls/gcloud.rb b/test/integration/stub_domains/controls/gcloud.rb index 03612e151e..48072bb119 100644 --- a/test/integration/stub_domains/controls/gcloud.rb +++ b/test/integration/stub_domains/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains/controls/kubectl.rb b/test/integration/stub_domains/controls/kubectl.rb index 1fa048e98d..1e53883a2d 100644 --- a/test/integration/stub_domains/controls/kubectl.rb +++ b/test/integration/stub_domains/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains_private/controls/gcloud.rb b/test/integration/stub_domains_private/controls/gcloud.rb index 3356196754..f16ee7b401 100644 --- a/test/integration/stub_domains_private/controls/gcloud.rb +++ b/test/integration/stub_domains_private/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains_private/controls/kubectl.rb b/test/integration/stub_domains_private/controls/kubectl.rb index e9a1bd7412..17502685d8 100644 --- a/test/integration/stub_domains_private/controls/kubectl.rb +++ b/test/integration/stub_domains_private/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains_upstream_nameservers/controls/gcloud.rb b/test/integration/stub_domains_upstream_nameservers/controls/gcloud.rb index 03612e151e..48072bb119 100644 --- a/test/integration/stub_domains_upstream_nameservers/controls/gcloud.rb +++ b/test/integration/stub_domains_upstream_nameservers/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains_upstream_nameservers/controls/kubectl.rb b/test/integration/stub_domains_upstream_nameservers/controls/kubectl.rb index 5223cbd2d4..8e8dfe086c 100644 --- a/test/integration/stub_domains_upstream_nameservers/controls/kubectl.rb +++ b/test/integration/stub_domains_upstream_nameservers/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/upstream_nameservers/controls/gcloud.rb b/test/integration/upstream_nameservers/controls/gcloud.rb index 03612e151e..48072bb119 100644 --- a/test/integration/upstream_nameservers/controls/gcloud.rb +++ b/test/integration/upstream_nameservers/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/upstream_nameservers/controls/kubectl.rb b/test/integration/upstream_nameservers/controls/kubectl.rb index 36612a02aa..21ec09c326 100644 --- a/test/integration/upstream_nameservers/controls/kubectl.rb +++ b/test/integration/upstream_nameservers/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/workload_metadata_config/controls/gcloud.rb b/test/integration/workload_metadata_config/controls/gcloud.rb index ea9c3627ce..e62606c78c 100644 --- a/test/integration/workload_metadata_config/controls/gcloud.rb +++ b/test/integration/workload_metadata_config/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/make.sh b/test/make.sh deleted file mode 100755 index ec1cd6b01d..0000000000 --- a/test/make.sh +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Please note that this file was generated from [terraform-google-module-template](https://github.com/terraform-google-modules/terraform-google-module-template). -# Please make sure to contribute relevant changes upstream! - -# Create a temporary directory that's auto-cleaned, even if the process aborts. -DELETE_AT_EXIT="$(mktemp -d)" -finish() { - [[ -d "${DELETE_AT_EXIT}" ]] && rm -rf "${DELETE_AT_EXIT}" -} -trap finish EXIT -# Create a temporary file in the auto-cleaned up directory while avoiding -# overwriting TMPDIR for other processes. -# shellcheck disable=SC2120 -# (Arguments may be passed, e.g. maketemp -d) -maketemp() { - TMPDIR="${DELETE_AT_EXIT}" mktemp "$@" -} - -# find_files is a helper to exclude .git directories and match only regular -# files to avoid double-processing symlinks. -find_files() { - local pth="$1" - shift - find "${pth}" '(' \ - -path '*/.git' -o \ - -path '*/.terraform' -o \ - -path '*/.kitchen' -o \ - -path './autogen' -o \ - -path './test/fixtures/all_examples' -o \ - -path './test/fixtures/shared' ')' \ - -prune -o -type f "$@" -} - -# Compatibility with both GNU and BSD style xargs. -compat_xargs() { - local compat=() rval - # Test if xargs is GNU or BSD style. GNU xargs will succeed with status 0 - # when given --no-run-if-empty and no input on STDIN. BSD xargs will fail and - # exit status non-zero If xargs fails, assume it is BSD style and proceed. - # stderr is silently redirected to avoid console log spam. - if xargs --no-run-if-empty /dev/null; then - compat=("--no-run-if-empty") - fi - xargs "${compat[@]}" "$@" - rval="$?" - if [[ -z "${NOWARN:-}" ]] && [[ "${rval}" -gt 0 ]]; then - echo "Warning: compat_xargs $* failed with exit code ${rval}" >&2 - fi - return "${rval}" -} - -# This function makes sure that the required files for -# releasing to OSS are present -function basefiles() { - local fn required_files="LICENSE README.md" - echo "Checking for required files ${required_files}" - for fn in ${required_files}; do - test -f "${fn}" || echo "Missing required file ${fn}" - done -} - -# This function runs 'terraform validate' and 'terraform fmt' -# against all directory paths which contain *.tf files. -function check_terraform() { - local rval=125 - # fmt is before validate for faster feedback, validate requires terraform - # init which takes time. - echo "Running terraform fmt" - find_files . -name "*.tf" -print0 \ - | compat_xargs -0 -n1 dirname \ - | sort -u \ - | compat_xargs -t -n1 terraform fmt -diff -check=true -write=false - rval="$?" - if [[ "${rval}" -gt 0 ]]; then - echo "Error: terraform fmt failed with exit code ${rval}" >&2 - echo "Check the output for diffs and correct using terraform fmt " >&2 - return "${rval}" - fi - echo "Running terraform validate" - find_files . -not -path "./test/fixtures/shared/*" -name "*.tf" -print0 \ - | compat_xargs -0 -n1 dirname \ - | sort -u \ - | compat_xargs -t -n1 helpers/terraform_validate -} - -# This function runs 'go fmt' and 'go vet' on every file -# that ends in '.go' -function golang() { - echo "Running go fmt and go vet" - find_files . -name "*.go" -print0 | compat_xargs -0 -n1 go fmt - find_files . -name "*.go" -print0 | compat_xargs -0 -n1 go vet -} - -# This function runs the flake8 linter on every file -# ending in '.py' -function check_python() { - echo "Running flake8" - find_files . -name "*.py" -print0 | compat_xargs -0 flake8 - return 0 -} - -# This function runs the shellcheck linter on every -# file ending in '.sh' -function check_shell() { - echo "Running shellcheck" - find_files . -name "*.sh" -print0 | compat_xargs -0 shellcheck -x -} - -# This function makes sure that there is no trailing whitespace -# in any files in the project. -# There are some exclusions -function check_trailing_whitespace() { - local rc - echo "Checking for trailing whitespace" - find_files . -print \ - | grep -v -E '\.(pyc|png)$' \ - | NOWARN=1 compat_xargs grep -H -n '[[:blank:]]$' - rc=$? - if [[ ${rc} -eq 0 ]]; then - return 1 - fi -} - -function generate() { - pip3 install --user -r ./helpers/generate_modules/requirements.txt - ./helpers/generate_modules/generate_modules.py -} - -function generate_docs() { - echo "Generating markdown docs with terraform-docs" - local pth helper_dir rval - helper_dir="$(pwd)/helpers" - while read -r pth; do - if [[ -e "${pth}/README.md" ]]; then - (cd "${pth}" || return 3; "${helper_dir}"/terraform_docs .;) - rval="$?" - if [[ "${rval}" -gt 0 ]]; then - echo "Error: terraform_docs in ${pth} exit code: ${rval}" >&2 - return "${rval}" - fi - else - echo "Skipping ${pth} because README.md does not exist." - fi - done < <(find_files . -name '*.tf' -print0 \ - | compat_xargs -0 -n1 dirname \ - | sort -u) -} - -function check_generate() { - TMPDIR=$(mktemp -d) - git worktree add --detach "$TMPDIR" >/dev/null - cd "$TMPDIR" || exit 1 - - generate >/dev/null - generate_docs >/dev/null - - git diff --stat --exit-code >/dev/null - rc=$? - cd - >/dev/null || exit 1 - - if [[ $rc -ne 0 ]]; then - echo '"make generate" creates a diff, run "make generate" and commit the results' - fi - rm -rf "$TMPDIR" - git worktree prune >/dev/null - - echo "Code was generated properly" - - exit $rc -} - -function check_generate_docs() { - TMPDIR=$(mktemp -d) - git worktree add --detach "$TMPDIR" >/dev/null - cd "$TMPDIR" || exit 1 - - generate_docs >/dev/null - git diff --stat --exit-code >/dev/null - rc=$? - cd - >/dev/null || exit 1 - - if [[ $rc -ne 0 ]]; then - echo '"make generate_docs" creates a diff, run "make generate_docs" and commit the results' - fi - rm -rf "$TMPDIR" - git worktree prune >/dev/null - - echo "Docs were generated properly" - - exit $rc -} - -function prepare_test_variables() { - echo "Preparing terraform.tfvars files for integration tests" - #shellcheck disable=2044 - for i in $(find ./test/fixtures -type f -name terraform.tfvars.sample); do - destination=${i/%.sample/} - if [ ! -f "${destination}" ]; then - cp "${i}" "${destination}" - echo "${destination} has been created. Please edit it to reflect your GCP configuration." - fi - done -} - -function check_headers() { - echo "Checking file headers" - # Use the exclusion behavior of find_files - find_files . -type f -print0 \ - | compat_xargs -0 python test/verify_boilerplate.py -} diff --git a/test/setup/.gitignore b/test/setup/.gitignore new file mode 100644 index 0000000000..0e515f83d2 --- /dev/null +++ b/test/setup/.gitignore @@ -0,0 +1,2 @@ +terraform.tfvars +source.sh diff --git a/test/setup/iam.tf b/test/setup/iam.tf new file mode 100644 index 0000000000..29facd32a9 --- /dev/null +++ b/test/setup/iam.tf @@ -0,0 +1,58 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + int_required_roles = [ + "roles/cloudkms.cryptoKeyEncrypterDecrypter", + "roles/compute.networkAdmin", + "roles/container.clusterAdmin", + "roles/container.developer", + "roles/iam.serviceAccountAdmin", + "roles/iam.serviceAccountUser", + "roles/compute.networkAdmin", + "roles/compute.viewer", + "roles/resourcemanager.projectIamAdmin" + ] +} + + +resource "random_id" "random_suffix" { + byte_length = 2 +} + +resource "google_service_account" "int_test" { + project = module.gke-project.project_id + account_id = "gke-int-test-${random_id.random_suffix.hex}" + display_name = "gke-int-test" +} + +resource "google_service_account" "gke_sa" { + project = module.gke-project.project_id + account_id = "gke-sa-int-test-${random_id.random_suffix.hex}" + display_name = "gke-sa-int-test" +} + +resource "google_project_iam_member" "int_test" { + count = length(local.int_required_roles) + + project = module.gke-project.project_id + role = local.int_required_roles[count.index] + member = "serviceAccount:${google_service_account.int_test.email}" +} + +resource "google_service_account_key" "int_test" { + service_account_id = google_service_account.int_test.id +} diff --git a/test/setup/main.tf b/test/setup/main.tf new file mode 100644 index 0000000000..f974c7408e --- /dev/null +++ b/test/setup/main.tf @@ -0,0 +1,41 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "gke-project" { + source = "terraform-google-modules/project-factory/google" + version = "~> 3.0" + + name = "ci-gke" + random_project_id = true + org_id = var.org_id + folder_id = var.folder_id + billing_account = var.billing_account + + activate_apis = [ + "bigquery-json.googleapis.com", + "cloudkms.googleapis.com", + "cloudresourcemanager.googleapis.com", + "compute.googleapis.com", + "container.googleapis.com", + "containerregistry.googleapis.com", + "iam.googleapis.com", + "iamcredentials.googleapis.com", + "oslogin.googleapis.com", + "pubsub.googleapis.com", + "serviceusage.googleapis.com", + "storage-api.googleapis.com", + ] +} diff --git a/Gemfile b/test/setup/make_source.sh old mode 100644 new mode 100755 similarity index 50% rename from Gemfile rename to test/setup/make_source.sh index a54d14ec29..b39944af41 --- a/Gemfile +++ b/test/setup/make_source.sh @@ -1,4 +1,6 @@ -# Copyright 2018 Google LLC +#!/usr/bin/env bash + +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,10 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -ruby "~> 2.5" +echo "#!/usr/bin/env bash" > ../source.sh + +project_id=$(terraform output project_id) +echo "export TF_VAR_project_id='$project_id'" >> ../source.sh + +sa_json=$(terraform output sa_key) +# shellcheck disable=SC2086 +echo "export SERVICE_ACCOUNT_JSON='$(echo $sa_json | base64 --decode)'" >> ../source.sh -source 'https://rubygems.org/' do - gem "kitchen-terraform", "~> 4.9" - gem "kubeclient", "~> 4.0" - gem "rest-client", "~> 2.0" -end +compute_engine_service_account=$(terraform output compute_engine_service_account) +echo "export TF_VAR_compute_engine_service_account='$compute_engine_service_account'" >> ../source.sh diff --git a/test/setup/outputs.tf b/test/setup/outputs.tf new file mode 100644 index 0000000000..3e508ed1c7 --- /dev/null +++ b/test/setup/outputs.tf @@ -0,0 +1,28 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "project_id" { + value = module.gke-project.project_id +} + +output "sa_key" { + value = google_service_account_key.int_test.private_key + sensitive = true +} + +output "compute_engine_service_account" { + value = google_service_account.gke_sa.email +} diff --git a/test/setup/variables.tf b/test/setup/variables.tf new file mode 100644 index 0000000000..6d80b89896 --- /dev/null +++ b/test/setup/variables.tf @@ -0,0 +1,26 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +variable "org_id" { + description = "The numeric organization id" +} + +variable "folder_id" { + description = "The folder to deploy in" +} + +variable "billing_account" { + description = "The billing account id associated with the project, e.g. XXXXXX-YYYYYY-ZZZZZZ" +} diff --git a/test/boilerplate/boilerplate.tf.txt b/test/setup/versions.tf similarity index 76% rename from test/boilerplate/boilerplate.tf.txt rename to test/setup/versions.tf index cfccff84ca..efbd8ea517 100644 --- a/test/boilerplate/boilerplate.tf.txt +++ b/test/setup/versions.tf @@ -1,5 +1,5 @@ /** - * Copyright 2018 Google LLC + * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,3 +13,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + +terraform { + required_version = ">= 0.12" +} + +provider "google" { + version = "~> 2.13.0" +} + +provider "google-beta" { + version = "~> 2.13.0" +} diff --git a/test/task_helper_functions.sh b/test/task_helper_functions.sh new file mode 100755 index 0000000000..70ab3db5c8 --- /dev/null +++ b/test/task_helper_functions.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function generate() { + pip3 install --user -r /workspace/helpers/generate_modules/requirements.txt + /workspace/helpers/generate_modules/generate_modules.py +} + +# Changed from using git-diff, to aviod errors on CI: +# fatal: not a git repository (or any parent up to mount point /) +function check_generate() { + local tempdir rval rc + setup_trap_handler + tempdir=$(mktemp -d) + rval=0 + echo "Checking submodule's files generation" + rsync -axh \ + --exclude '*/.terraform' \ + --exclude '*/.kitchen' \ + --exclude '*/.git' \ + /workspace "${tempdir}" >/dev/null 2>/dev/null + cd "${tempdir}" || exit 1 + generate >/dev/null 2>/dev/null + diff -r \ + --exclude=".terraform" \ + --exclude=".kitchen" \ + --exclude=".git" \ + /workspace "${tempdir}/workspace" + rc=$? + if [[ "${rc}" -ne 0 ]]; then + echo "Error: submodule's files generation has not been run, please run the" + echo "'source /workspace/helpers/generate.sh && generate' commands and commit the above changes." + ((rval++)) + fi + cd /workspace || exit 1 + rm -Rf "${tempdir}" + return $((rval)) +} + +find_files() { + local pth="$1" + shift + find "${pth}" '(' \ + -path '*/.git' -o \ + -path '*/.terraform' -o \ + -path '*/.kitchen' -o \ + -path './autogen' -o \ + -path './test/fixtures/all_examples' -o \ + -path './test/fixtures/shared' ')' \ + -prune -o -type f "$@" +} diff --git a/test/test_verify_boilerplate.py b/test/test_verify_boilerplate.py deleted file mode 100755 index 22a3cca055..0000000000 --- a/test/test_verify_boilerplate.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -''' A simple test for the verify_boilerplate python script. -This will create a set of test files, both valid and invalid, -and confirm that the has_valid_header call returns the correct -value. - -It also checks the number of files that are found by the -get_files call. -''' -from copy import deepcopy -from tempfile import mkdtemp -from shutil import rmtree -import unittest -from verify_boilerplate import has_valid_header, get_refs, get_regexs, \ - get_args, get_files - - -class AllTestCase(unittest.TestCase): - """ - All of the setup, teardown, and tests are contained in this - class. - """ - - def write_file(self, filename, content, expected): - """ - A utility method that creates test files, and adds them to - the cases that will be tested. - - Args: - filename: (string) the file name (path) to be created. - content: (list of strings) the contents of the file. - expected: (boolean) True if the header is expected to be valid, - false if not. - """ - - file = open(filename, 'w+') - for line in content: - file.write(line + "\n") - file.close() - self.cases[filename] = expected - - def create_test_files(self, tmp_path, extension, header): - """ - Creates 2 test files for .tf, .xml, .go, etc and one for - Dockerfile, and Makefile. - - The reason for the difference is that Makefile and Dockerfile - don't have an extension. These would be substantially more - difficult to create negative test cases, unless the files - were written, deleted, and re-written. - - Args: - tmp_path: (string) the path in which to create the files - extension: (string) the file extension - header: (list of strings) the header/boilerplate content - """ - - content = "\n...blah \ncould be code or could be garbage\n" - special_cases = ["Dockerfile", "Makefile"] - header_template = deepcopy(header) - valid_filename = tmp_path + extension - valid_content = header_template.append(content) - if extension not in special_cases: - # Invalid test cases for non-*file files (.tf|.py|.sh|.yaml|.xml..) - invalid_header = [] - for line in header_template: - if "2018" in line: - invalid_header.append(line.replace('2018', 'YEAR')) - else: - invalid_header.append(line) - invalid_header.append(content) - invalid_content = invalid_header - invalid_filename = tmp_path + "invalid." + extension - self.write_file(invalid_filename, invalid_content, False) - valid_filename = tmp_path + "testfile." + extension - - valid_content = header_template - self.write_file(valid_filename, valid_content, True) - - def setUp(self): - """ - Set initial counts and values, and initializes the setup of the - test files. - """ - self.cases = {} - self.tmp_path = mkdtemp() + "/" - self.my_args = get_args() - self.my_refs = get_refs(self.my_args) - self.my_regex = get_regexs() - self.prexisting_file_count = len( - get_files(self.my_refs.keys(), self.my_args)) - for key in self.my_refs: - self.create_test_files(self.tmp_path, key, - self.my_refs.get(key)) - - def tearDown(self): - """ Delete the test directory. """ - rmtree(self.tmp_path) - - def test_files_headers(self): - """ - Confirms that the expected output of has_valid_header is correct. - """ - for case in self.cases: - if self.cases[case]: - self.assertTrue(has_valid_header(case, self.my_refs, - self.my_regex)) - else: - self.assertFalse(has_valid_header(case, self.my_refs, - self.my_regex)) - - def test_invalid_count(self): - """ - Test that the initial files found isn't zero, indicating - a problem with the code. - """ - self.assertFalse(self.prexisting_file_count == 0) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/verify_boilerplate.py b/test/verify_boilerplate.py deleted file mode 100644 index a632fdedcc..0000000000 --- a/test/verify_boilerplate.py +++ /dev/null @@ -1,279 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# Verifies that all source files contain the necessary copyright boilerplate -# snippet. -# This is based on existing work -# https://github.com/kubernetes/test-infra/blob/master/hack -# /verify_boilerplate.py -from __future__ import print_function -import argparse -import glob -import os -import re -import sys - - -def get_args(): - """Parses command line arguments. - - Configures and runs argparse.ArgumentParser to extract command line - arguments. - - Returns: - An argparse.Namespace containing the arguments parsed from the - command line - """ - parser = argparse.ArgumentParser() - parser.add_argument("filenames", - help="list of files to check, " - "all files if unspecified", - nargs='*') - rootdir = os.path.dirname(__file__) + "/../" - rootdir = os.path.abspath(rootdir) - parser.add_argument( - "--rootdir", - default=rootdir, - help="root directory to examine") - - default_boilerplate_dir = os.path.join(rootdir, "test/boilerplate") - parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir) - return parser.parse_args() - - -def get_refs(ARGS): - """Converts the directory of boilerplate files into a map keyed by file - extension. - - Reads each boilerplate file's contents into an array, then adds that array - to a map keyed by the file extension. - - Returns: - A map of boilerplate lines, keyed by file extension. For example, - boilerplate.py.txt would result in the k,v pair {".py": py_lines} where - py_lines is an array containing each line of the file. - """ - refs = {} - - # Find and iterate over the absolute path for each boilerplate template - for path in glob.glob(os.path.join( - ARGS.boilerplate_dir, - "boilerplate.*.txt")): - extension = os.path.basename(path).split(".")[1] - ref_file = open(path, 'r') - ref = ref_file.read().splitlines() - ref_file.close() - refs[extension] = ref - return refs - - -# pylint: disable=too-many-locals -def has_valid_header(filename, refs, regexs): - """Test whether a file has the correct boilerplate header. - - Tests each file against the boilerplate stored in refs for that file type - (based on extension), or by the entire filename (eg Dockerfile, Makefile). - Some heuristics are applied to remove build tags and shebangs, but little - variance in header formatting is tolerated. - - Args: - filename: A string containing the name of the file to test - refs: A map of boilerplate headers, keyed by file extension - regexs: a map of compiled regex objects used in verifying boilerplate - - Returns: - True if the file has the correct boilerplate header, otherwise returns - False. - """ - try: - with open(filename, 'r') as fp: # pylint: disable=invalid-name - data = fp.read() - except IOError: - return False - basename = os.path.basename(filename) - extension = get_file_extension(filename) - if extension: - ref = refs[extension] - else: - ref = refs[basename] - # remove build tags from the top of Go files - if extension == "go": - con = regexs["go_build_constraints"] - (data, found) = con.subn("", data, 1) - # remove shebang - elif extension == "sh" or extension == "py": - she = regexs["shebang"] - (data, found) = she.subn("", data, 1) - data = data.splitlines() - # if our test file is smaller than the reference it surely fails! - if len(ref) > len(data): - return False - # trim our file to the same number of lines as the reference file - data = data[:len(ref)] - year = regexs["year"] - for datum in data: - if year.search(datum): - return False - - # if we don't match the reference at this point, fail - if ref != data: - return False - return True - - -def get_file_extension(filename): - """Extracts the extension part of a filename. - - Identifies the extension as everything after the last period in filename. - - Args: - filename: string containing the filename - - Returns: - A string containing the extension in lowercase - """ - return os.path.splitext(filename)[1].split(".")[-1].lower() - - -# These directories will be omitted from header checks -SKIPPED_DIRS = [ - 'Godeps', 'third_party', '_gopath', '_output', - '.git', 'vendor', '__init__.py', 'node_modules' -] - - -def normalize_files(files): - """Extracts the files that require boilerplate checking from the files - argument. - - A new list will be built. Each path from the original files argument will - be added unless it is within one of SKIPPED_DIRS. All relative paths will - be converted to absolute paths by prepending the root_dir path parsed from - the command line, or its default value. - - Args: - files: a list of file path strings - - Returns: - A modified copy of the files list where any any path in a skipped - directory is removed, and all paths have been made absolute. - """ - newfiles = [] - for pathname in files: - if any(x in pathname for x in SKIPPED_DIRS): - continue - newfiles.append(pathname) - for idx, pathname in enumerate(newfiles): - if not os.path.isabs(pathname): - newfiles[idx] = os.path.join(ARGS.rootdir, pathname) - return newfiles - - -def get_files(extensions, ARGS): - """Generates a list of paths whose boilerplate should be verified. - - If a list of file names has been provided on the command line, it will be - treated as the initial set to search. Otherwise, all paths within rootdir - will be discovered and used as the initial set. - - Once the initial set of files is identified, it is normalized via - normalize_files() and further stripped of any file name whose extension is - not in extensions. - - Args: - extensions: a list of file extensions indicating which file types - should have their boilerplate verified - - Returns: - A list of absolute file paths - """ - files = [] - if ARGS.filenames: - files = ARGS.filenames - else: - for root, dirs, walkfiles in os.walk(ARGS.rootdir): - # don't visit certain dirs. This is just a performance improvement - # as we would prune these later in normalize_files(). But doing it - # cuts down the amount of filesystem walking we do and cuts down - # the size of the file list - for dpath in SKIPPED_DIRS: - if dpath in dirs: - dirs.remove(dpath) - for name in walkfiles: - pathname = os.path.join(root, name) - files.append(pathname) - files = normalize_files(files) - outfiles = [] - for pathname in files: - basename = os.path.basename(pathname) - extension = get_file_extension(pathname) - if extension in extensions or basename in extensions: - outfiles.append(pathname) - return outfiles - - -def get_regexs(): - """Builds a map of regular expressions used in boilerplate validation. - - There are two scenarios where these regexes are used. The first is in - validating the date referenced is the boilerplate, by ensuring it is an - acceptable year. The second is in identifying non-boilerplate elements, - like shebangs and compiler hints that should be ignored when validating - headers. - - Returns: - A map of compiled regular expression objects, keyed by mnemonic. - """ - regexs = {} - # Search for "YEAR" which exists in the boilerplate, but shouldn't in the - # real thing - regexs["year"] = re.compile('YEAR') - # dates can be 2014, 2015, 2016 or 2017, company holder names can be - # anything - regexs["date"] = re.compile('(2014|2015|2016|2017|2018)') - # strip // +build \n\n build constraints - regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", - re.MULTILINE) - # strip #!.* from shell/python scripts - regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE) - return regexs - - -def main(args): - """Identifies and verifies files that should have the desired boilerplate. - - Retrieves the lists of files to be validated and tests each one in turn. - If all files contain correct boilerplate, this function terminates - normally. Otherwise it prints the name of each non-conforming file and - exists with a non-zero status code. - """ - regexs = get_regexs() - refs = get_refs(args) - filenames = get_files(refs.keys(), args) - nonconforming_files = [] - for filename in filenames: - if not has_valid_header(filename, refs, regexs): - nonconforming_files.append(filename) - if nonconforming_files: - print('%d files have incorrect boilerplate headers:' % len( - nonconforming_files)) - for filename in sorted(nonconforming_files): - print(os.path.relpath(filename, args.rootdir)) - sys.exit(1) - - -if __name__ == "__main__": - ARGS = get_args() - main(ARGS) diff --git a/variables.tf b/variables.tf index f74396e29e..5edd4b3405 100644 --- a/variables.tf +++ b/variables.tf @@ -179,16 +179,6 @@ variable "node_pools_metadata" { } } -variable "node_pools_taints" { - type = map(list(object({ key = string, value = string, effect = string }))) - description = "Map of lists containing node taints by node-pool name" - - default = { - all = [] - default-node-pool = [] - } -} - variable "node_pools_tags" { type = map(list(string)) description = "Map of lists containing node network tags by node-pool name"